Spaces:
Build error
Build error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +13 -0
- .gitignore +152 -0
- GroundingDINO/.asset/COCO.png +0 -0
- GroundingDINO/.asset/GD_GLIGEN.png +3 -0
- GroundingDINO/.asset/GD_SD.png +3 -0
- GroundingDINO/.asset/ODinW.png +0 -0
- GroundingDINO/.asset/arch.png +0 -0
- GroundingDINO/.asset/cats.png +0 -0
- GroundingDINO/.asset/grounding_dino_logo.png +0 -0
- GroundingDINO/.asset/hero_figure.png +3 -0
- GroundingDINO/.asset/model_explan1.PNG +0 -0
- GroundingDINO/.asset/model_explan2.PNG +0 -0
- GroundingDINO/.gitignore +146 -0
- GroundingDINO/LICENSE +201 -0
- GroundingDINO/README.md +306 -0
- GroundingDINO/build/lib.linux-x86_64-cpython-310/groundingdino/_C.cpython-310-x86_64-linux-gnu.so +3 -0
- GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.o +3 -0
- GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.o +0 -0
- GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/cuda_version.o +0 -0
- GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.o +3 -0
- GroundingDINO/demo/create_coco_dataset.py +83 -0
- GroundingDINO/demo/gradio_app.py +125 -0
- GroundingDINO/demo/image_editing_with_groundingdino_gligen.ipynb +0 -0
- GroundingDINO/demo/image_editing_with_groundingdino_stablediffusion.ipynb +0 -0
- GroundingDINO/demo/inference_on_a_image.py +172 -0
- GroundingDINO/groundingdino.egg-info/PKG-INFO +209 -0
- GroundingDINO/groundingdino.egg-info/SOURCES.txt +45 -0
- GroundingDINO/groundingdino.egg-info/dependency_links.txt +1 -0
- GroundingDINO/groundingdino.egg-info/requires.txt +10 -0
- GroundingDINO/groundingdino.egg-info/top_level.txt +1 -0
- GroundingDINO/groundingdino/_C.cpython-310-x86_64-linux-gnu.so +3 -0
- GroundingDINO/groundingdino/__init__.py +0 -0
- GroundingDINO/groundingdino/__pycache__/__init__.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/config/GroundingDINO_SwinB_cfg.py +43 -0
- GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py +43 -0
- GroundingDINO/groundingdino/config/__init__.py +0 -0
- GroundingDINO/groundingdino/datasets/__init__.py +0 -0
- GroundingDINO/groundingdino/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/datasets/__pycache__/transforms.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/datasets/transforms.py +311 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__init__.py +15 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/__init__.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/bertwarper.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/fuse_modules.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/groundingdino.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/ms_deform_attn.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/transformer.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/transformer_vanilla.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/utils.cpython-310.pyc +0 -0
- GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py +1 -0
.gitattributes
CHANGED
|
@@ -32,3 +32,16 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
GroundingDINO/.asset/GD_GLIGEN.png filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
GroundingDINO/.asset/GD_SD.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
GroundingDINO/.asset/hero_figure.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
GroundingDINO/build/lib.linux-x86_64-cpython-310/groundingdino/_C.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My[[:space:]]Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.o filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My[[:space:]]Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.o filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
GroundingDINO/groundingdino/_C.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
Matte-Anything/figs/demo1.png filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
Matte-Anything/figs/demo2.png filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
Matte-Anything/figs/web_ui.gif filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
figs/demo1.png filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
figs/demo2.png filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
figs/web_ui.gif filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*.sh
|
| 2 |
+
change_submit.py
|
| 3 |
+
cluster_submit.yaml
|
| 4 |
+
|
| 5 |
+
# Byte-compiled / optimized / DLL files
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.py[cod]
|
| 8 |
+
*$py.class
|
| 9 |
+
|
| 10 |
+
# C extensions
|
| 11 |
+
*.so
|
| 12 |
+
|
| 13 |
+
# Distribution / packaging
|
| 14 |
+
work_dirs
|
| 15 |
+
test
|
| 16 |
+
val
|
| 17 |
+
ckpts
|
| 18 |
+
data
|
| 19 |
+
.Python
|
| 20 |
+
build/
|
| 21 |
+
ckpts/
|
| 22 |
+
data/
|
| 23 |
+
ckpts
|
| 24 |
+
data
|
| 25 |
+
test/
|
| 26 |
+
val/
|
| 27 |
+
work_dirs/
|
| 28 |
+
develop-eggs/
|
| 29 |
+
dist/
|
| 30 |
+
downloads/
|
| 31 |
+
eggs/
|
| 32 |
+
.eggs/
|
| 33 |
+
lib/
|
| 34 |
+
lib64/
|
| 35 |
+
parts/
|
| 36 |
+
sdist/
|
| 37 |
+
var/
|
| 38 |
+
wheels/
|
| 39 |
+
pip-wheel-metadata/
|
| 40 |
+
share/python-wheels/
|
| 41 |
+
*.egg-info/
|
| 42 |
+
.installed.cfg
|
| 43 |
+
*.egg
|
| 44 |
+
MANIFEST
|
| 45 |
+
pretrained/
|
| 46 |
+
|
| 47 |
+
# PyInstaller
|
| 48 |
+
# Usually these files are written by a python script from a template
|
| 49 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 50 |
+
*.manifest
|
| 51 |
+
*.spec
|
| 52 |
+
|
| 53 |
+
# Installer logs
|
| 54 |
+
pip-log.txt
|
| 55 |
+
pip-delete-this-directory.txt
|
| 56 |
+
|
| 57 |
+
# Unit test / coverage reports
|
| 58 |
+
htmlcov/
|
| 59 |
+
.tox/
|
| 60 |
+
.nox/
|
| 61 |
+
.coverage
|
| 62 |
+
.coverage.*
|
| 63 |
+
.cache
|
| 64 |
+
nosetests.xml
|
| 65 |
+
coverage.xml
|
| 66 |
+
*.cover
|
| 67 |
+
*.py,cover
|
| 68 |
+
.hypothesis/
|
| 69 |
+
.pytest_cache/
|
| 70 |
+
|
| 71 |
+
# Translations
|
| 72 |
+
*.mo
|
| 73 |
+
*.pot
|
| 74 |
+
|
| 75 |
+
# Django stuff:
|
| 76 |
+
*.log
|
| 77 |
+
local_settings.py
|
| 78 |
+
db.sqlite3
|
| 79 |
+
db.sqlite3-journal
|
| 80 |
+
|
| 81 |
+
# Flask stuff:
|
| 82 |
+
instance/
|
| 83 |
+
.webassets-cache
|
| 84 |
+
|
| 85 |
+
# Scrapy stuff:
|
| 86 |
+
.scrapy
|
| 87 |
+
|
| 88 |
+
# Sphinx documentation
|
| 89 |
+
docs/_build/
|
| 90 |
+
|
| 91 |
+
# PyBuilder
|
| 92 |
+
target/
|
| 93 |
+
|
| 94 |
+
# Jupyter Notebook
|
| 95 |
+
.ipynb_checkpoints
|
| 96 |
+
|
| 97 |
+
# IPython
|
| 98 |
+
profile_default/
|
| 99 |
+
ipython_config.py
|
| 100 |
+
|
| 101 |
+
# pyenv
|
| 102 |
+
.python-version
|
| 103 |
+
|
| 104 |
+
# pipenv
|
| 105 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 106 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 107 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 108 |
+
# install all needed dependencies.
|
| 109 |
+
#Pipfile.lock
|
| 110 |
+
|
| 111 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 112 |
+
__pypackages__/
|
| 113 |
+
|
| 114 |
+
# Celery stuff
|
| 115 |
+
celerybeat-schedule
|
| 116 |
+
celerybeat.pid
|
| 117 |
+
|
| 118 |
+
# SageMath parsed files
|
| 119 |
+
*.sage.py
|
| 120 |
+
|
| 121 |
+
# Environments
|
| 122 |
+
.env
|
| 123 |
+
.venv
|
| 124 |
+
env/
|
| 125 |
+
venv/
|
| 126 |
+
ENV/
|
| 127 |
+
env.bak/
|
| 128 |
+
venv.bak/
|
| 129 |
+
|
| 130 |
+
# Spyder project settings
|
| 131 |
+
.spyderproject
|
| 132 |
+
.spyproject
|
| 133 |
+
|
| 134 |
+
# Rope project settings
|
| 135 |
+
.ropeproject
|
| 136 |
+
|
| 137 |
+
# mkdocs documentation
|
| 138 |
+
/site
|
| 139 |
+
|
| 140 |
+
# mypy
|
| 141 |
+
.mypy_cache/
|
| 142 |
+
.dmypy.json
|
| 143 |
+
dmypy.json
|
| 144 |
+
|
| 145 |
+
# Pyre type checker
|
| 146 |
+
.pyre/
|
| 147 |
+
|
| 148 |
+
cluster.sh
|
| 149 |
+
|
| 150 |
+
# GroundingDINO
|
| 151 |
+
GroundingDINO/
|
| 152 |
+
testcode/
|
GroundingDINO/.asset/COCO.png
ADDED
|
GroundingDINO/.asset/GD_GLIGEN.png
ADDED
|
Git LFS Details
|
GroundingDINO/.asset/GD_SD.png
ADDED
|
Git LFS Details
|
GroundingDINO/.asset/ODinW.png
ADDED
|
GroundingDINO/.asset/arch.png
ADDED
|
GroundingDINO/.asset/cats.png
ADDED
|
GroundingDINO/.asset/grounding_dino_logo.png
ADDED
|
GroundingDINO/.asset/hero_figure.png
ADDED
|
Git LFS Details
|
GroundingDINO/.asset/model_explan1.PNG
ADDED
|
|
GroundingDINO/.asset/model_explan2.PNG
ADDED
|
|
GroundingDINO/.gitignore
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# IDE
|
| 2 |
+
.idea/
|
| 3 |
+
.vscode/
|
| 4 |
+
|
| 5 |
+
# Byte-compiled / optimized / DLL files
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.py[cod]
|
| 8 |
+
*$py.class
|
| 9 |
+
|
| 10 |
+
# C extensions
|
| 11 |
+
*.so
|
| 12 |
+
|
| 13 |
+
# Distribution / packaging
|
| 14 |
+
.Python
|
| 15 |
+
build/
|
| 16 |
+
develop-eggs/
|
| 17 |
+
dist/
|
| 18 |
+
downloads/
|
| 19 |
+
eggs/
|
| 20 |
+
.eggs/
|
| 21 |
+
lib/
|
| 22 |
+
lib64/
|
| 23 |
+
parts/
|
| 24 |
+
sdist/
|
| 25 |
+
var/
|
| 26 |
+
wheels/
|
| 27 |
+
pip-wheel-metadata/
|
| 28 |
+
share/python-wheels/
|
| 29 |
+
*.egg-info/
|
| 30 |
+
.installed.cfg
|
| 31 |
+
*.egg
|
| 32 |
+
MANIFEST
|
| 33 |
+
|
| 34 |
+
# PyInstaller
|
| 35 |
+
# Usually these files are written by a python script from a template
|
| 36 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 37 |
+
*.manifest
|
| 38 |
+
*.spec
|
| 39 |
+
|
| 40 |
+
# Installer logs
|
| 41 |
+
pip-log.txt
|
| 42 |
+
pip-delete-this-directory.txt
|
| 43 |
+
|
| 44 |
+
# Unit test / coverage reports
|
| 45 |
+
htmlcov/
|
| 46 |
+
.tox/
|
| 47 |
+
.nox/
|
| 48 |
+
.coverage
|
| 49 |
+
.coverage.*
|
| 50 |
+
.cache
|
| 51 |
+
nosetests.xml
|
| 52 |
+
coverage.xml
|
| 53 |
+
*.cover
|
| 54 |
+
*.py,cover
|
| 55 |
+
.hypothesis/
|
| 56 |
+
.pytest_cache/
|
| 57 |
+
|
| 58 |
+
# Translations
|
| 59 |
+
*.mo
|
| 60 |
+
*.pot
|
| 61 |
+
|
| 62 |
+
# Django stuff:
|
| 63 |
+
*.log
|
| 64 |
+
local_settings.py
|
| 65 |
+
db.sqlite3
|
| 66 |
+
db.sqlite3-journal
|
| 67 |
+
|
| 68 |
+
# Flask stuff:
|
| 69 |
+
instance/
|
| 70 |
+
.webassets-cache
|
| 71 |
+
|
| 72 |
+
# Scrapy stuff:
|
| 73 |
+
.scrapy
|
| 74 |
+
|
| 75 |
+
# Sphinx documentation
|
| 76 |
+
docs/_build/
|
| 77 |
+
|
| 78 |
+
# PyBuilder
|
| 79 |
+
target/
|
| 80 |
+
|
| 81 |
+
# Jupyter Notebook
|
| 82 |
+
.ipynb_checkpoints
|
| 83 |
+
|
| 84 |
+
# IPython
|
| 85 |
+
profile_default/
|
| 86 |
+
ipython_config.py
|
| 87 |
+
|
| 88 |
+
# pyenv
|
| 89 |
+
.python-version
|
| 90 |
+
|
| 91 |
+
# pipenv
|
| 92 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 93 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 94 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 95 |
+
# install all needed dependencies.
|
| 96 |
+
#Pipfile.lock
|
| 97 |
+
|
| 98 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 99 |
+
__pypackages__/
|
| 100 |
+
|
| 101 |
+
# Celery stuff
|
| 102 |
+
celerybeat-schedule
|
| 103 |
+
celerybeat.pid
|
| 104 |
+
|
| 105 |
+
# SageMath parsed files
|
| 106 |
+
*.sage.py
|
| 107 |
+
|
| 108 |
+
# Environments
|
| 109 |
+
.env
|
| 110 |
+
.venv
|
| 111 |
+
env/
|
| 112 |
+
venv/
|
| 113 |
+
ENV/
|
| 114 |
+
env.bak/
|
| 115 |
+
venv.bak/
|
| 116 |
+
|
| 117 |
+
# Spyder project settings
|
| 118 |
+
.spyderproject
|
| 119 |
+
.spyproject
|
| 120 |
+
|
| 121 |
+
# Rope project settings
|
| 122 |
+
.ropeproject
|
| 123 |
+
|
| 124 |
+
# mkdocs documentation
|
| 125 |
+
/site
|
| 126 |
+
|
| 127 |
+
# mypy
|
| 128 |
+
.mypy_cache/
|
| 129 |
+
.dmypy.json
|
| 130 |
+
dmypy.json
|
| 131 |
+
|
| 132 |
+
# Pyre type checker
|
| 133 |
+
.pyre/
|
| 134 |
+
|
| 135 |
+
# vscode
|
| 136 |
+
.vscode/
|
| 137 |
+
output/
|
| 138 |
+
outputs/
|
| 139 |
+
subs/
|
| 140 |
+
logs/
|
| 141 |
+
|
| 142 |
+
grounding/config/configs
|
| 143 |
+
grounding/version.py
|
| 144 |
+
|
| 145 |
+
vis/
|
| 146 |
+
tmp/
|
GroundingDINO/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2020 - present, Facebook, Inc
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
GroundingDINO/README.md
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<img src="./.asset/grounding_dino_logo.png" width="30%">
|
| 3 |
+
</div>
|
| 4 |
+
|
| 5 |
+
# :sauropod: Grounding DINO
|
| 6 |
+
|
| 7 |
+
[](https://paperswithcode.com/sota/zero-shot-object-detection-on-mscoco?p=grounding-dino-marrying-dino-with-grounded) [](https://paperswithcode.com/sota/zero-shot-object-detection-on-odinw?p=grounding-dino-marrying-dino-with-grounded) \
|
| 8 |
+
[](https://paperswithcode.com/sota/object-detection-on-coco-minival?p=grounding-dino-marrying-dino-with-grounded) [](https://paperswithcode.com/sota/object-detection-on-coco?p=grounding-dino-marrying-dino-with-grounded)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
**[IDEA-CVR, IDEA-Research](https://github.com/IDEA-Research)**
|
| 12 |
+
|
| 13 |
+
[Shilong Liu](http://www.lsl.zone/), [Zhaoyang Zeng](https://scholar.google.com/citations?user=U_cvvUwAAAAJ&hl=zh-CN&oi=ao), [Tianhe Ren](https://rentainhe.github.io/), [Feng Li](https://scholar.google.com/citations?user=ybRe9GcAAAAJ&hl=zh-CN), [Hao Zhang](https://scholar.google.com/citations?user=B8hPxMQAAAAJ&hl=zh-CN), [Jie Yang](https://github.com/yangjie-cv), [Chunyuan Li](https://scholar.google.com/citations?user=Zd7WmXUAAAAJ&hl=zh-CN&oi=ao), [Jianwei Yang](https://jwyang.github.io/), [Hang Su](https://scholar.google.com/citations?hl=en&user=dxN1_X0AAAAJ&view_op=list_works&sortby=pubdate), [Jun Zhu](https://scholar.google.com/citations?hl=en&user=axsP38wAAAAJ), [Lei Zhang](https://www.leizhang.org/)<sup>:email:</sup>.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
[[`Paper`](https://arxiv.org/abs/2303.05499)] [[`Demo`](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)] [[`BibTex`](#black_nib-citation)]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
PyTorch implementation and pretrained models for Grounding DINO. For details, see the paper **[Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection](https://arxiv.org/abs/2303.05499)**.
|
| 20 |
+
|
| 21 |
+
## :sun_with_face: Helpful Tutorial
|
| 22 |
+
|
| 23 |
+
- :grapes: [[Read our arXiv Paper](https://arxiv.org/abs/2303.05499)]
|
| 24 |
+
- :apple: [[Watch our simple introduction video on YouTube](https://youtu.be/wxWDt5UiwY8)]
|
| 25 |
+
- :rose: [[Try the Colab Demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb)]
|
| 26 |
+
- :sunflower: [[Try our Official Huggingface Demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)]
|
| 27 |
+
- :maple_leaf: [[Watch the Step by Step Tutorial about GroundingDINO by Roboflow AI](https://youtu.be/cMa77r3YrDk)]
|
| 28 |
+
- :mushroom: [[GroundingDINO: Automated Dataset Annotation and Evaluation by Roboflow AI](https://youtu.be/C4NqaRBz_Kw)]
|
| 29 |
+
- :hibiscus: [[Accelerate Image Annotation with SAM and GroundingDINO by Roboflow AI](https://youtu.be/oEQYStnF2l8)]
|
| 30 |
+
|
| 31 |
+
<!-- Grounding DINO Methods |
|
| 32 |
+
[](https://arxiv.org/abs/2303.05499)
|
| 33 |
+
[](https://youtu.be/wxWDt5UiwY8) -->
|
| 34 |
+
|
| 35 |
+
<!-- Grounding DINO Demos |
|
| 36 |
+
[](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) -->
|
| 37 |
+
<!-- [](https://youtu.be/cMa77r3YrDk)
|
| 38 |
+
[](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo)
|
| 39 |
+
[](https://youtu.be/oEQYStnF2l8)
|
| 40 |
+
[](https://youtu.be/C4NqaRBz_Kw) -->
|
| 41 |
+
|
| 42 |
+
## :sparkles: Highlight Projects
|
| 43 |
+
|
| 44 |
+
- [DetGPT: Detect What You Need via Reasoning](https://github.com/OptimalScale/DetGPT)
|
| 45 |
+
- [Grounded-SAM: Marrying Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)
|
| 46 |
+
- [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb)
|
| 47 |
+
- [Grounding DINO with GLIGEN for Controllable Image Editing](demo/image_editing_with_groundingdino_gligen.ipynb)
|
| 48 |
+
- [OpenSeeD: A Simple and Strong Openset Segmentation Model](https://github.com/IDEA-Research/OpenSeeD)
|
| 49 |
+
- [SEEM: Segment Everything Everywhere All at Once](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once)
|
| 50 |
+
- [X-GPT: Conversational Visual Agent supported by X-Decoder](https://github.com/microsoft/X-Decoder/tree/xgpt)
|
| 51 |
+
- [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://github.com/gligen/GLIGEN)
|
| 52 |
+
- [LLaVA: Large Language and Vision Assistant](https://github.com/haotian-liu/LLaVA)
|
| 53 |
+
|
| 54 |
+
<!-- Extensions | [Grounding DINO with Segment Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything); [Grounding DINO with Stable Diffusion](demo/image_editing_with_groundingdino_stablediffusion.ipynb); [Grounding DINO with GLIGEN](demo/image_editing_with_groundingdino_gligen.ipynb) -->
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
<!-- Official PyTorch implementation of [Grounding DINO](https://arxiv.org/abs/2303.05499), a stronger open-set object detector. Code is available now! -->
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
## :bulb: Highlight
|
| 62 |
+
|
| 63 |
+
- **Open-Set Detection.** Detect **everything** with language!
|
| 64 |
+
- **High Performancce.** COCO zero-shot **52.5 AP** (training without COCO data!). COCO fine-tune **63.0 AP**.
|
| 65 |
+
- **Flexible.** Collaboration with Stable Diffusion for Image Editting.
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
## :fire: News
|
| 71 |
+
- **`2023/04/15`**: Refer to [CV in the Wild Readings](https://github.com/Computer-Vision-in-the-Wild/CVinW_Readings) for those who are interested in open-set recognition!
|
| 72 |
+
- **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
|
| 73 |
+
- **`2023/04/08`**: We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
|
| 74 |
+
- **`2023/04/06`**: We build a new demo by marrying GroundingDINO with [Segment-Anything](https://github.com/facebookresearch/segment-anything) named **[Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything)** aims to support segmentation in GroundingDINO.
|
| 75 |
+
- **`2023/03/28`**: A YouTube [video](https://youtu.be/cMa77r3YrDk) about Grounding DINO and basic object detection prompt engineering. [[SkalskiP](https://github.com/SkalskiP)]
|
| 76 |
+
- **`2023/03/28`**: Add a [demo](https://huggingface.co/spaces/ShilongLiu/Grounding_DINO_demo) on Hugging Face Space!
|
| 77 |
+
- **`2023/03/27`**: Support CPU-only mode. Now the model can run on machines without GPUs.
|
| 78 |
+
- **`2023/03/25`**: A [demo](https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/zero-shot-object-detection-with-grounding-dino.ipynb) for Grounding DINO is available at Colab. [[SkalskiP](https://github.com/SkalskiP)]
|
| 79 |
+
- **`2023/03/22`**: Code is available Now!
|
| 80 |
+
|
| 81 |
+
<details open>
|
| 82 |
+
<summary><font size="4">
|
| 83 |
+
Description
|
| 84 |
+
</font></summary>
|
| 85 |
+
<a href="https://arxiv.org/abs/2303.05499">Paper</a> introduction.
|
| 86 |
+
<img src=".asset/hero_figure.png" alt="ODinW" width="100%">
|
| 87 |
+
Marrying <a href="https://github.com/IDEA-Research/GroundingDINO">Grounding DINO</a> and <a href="https://github.com/gligen/GLIGEN">GLIGEN</a>
|
| 88 |
+
<img src="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/GD_GLIGEN.png" alt="gd_gligen" width="100%">
|
| 89 |
+
</details>
|
| 90 |
+
|
| 91 |
+
## :star: Explanations/Tips for Grounding DINO Inputs and Outputs
|
| 92 |
+
- Grounding DINO accepts an `(image, text)` pair as inputs.
|
| 93 |
+
- It outputs `900` (by default) object boxes. Each box has similarity scores across all input words. (as shown in Figures below.)
|
| 94 |
+
- We defaultly choose the boxes whose highest similarities are higher than a `box_threshold`.
|
| 95 |
+
- We extract the words whose similarities are higher than the `text_threshold` as predicted labels.
|
| 96 |
+
- If you want to obtain objects of specific phrases, like the `dogs` in the sentence `two dogs with a stick.`, you can select the boxes with highest text similarities with `dogs` as final outputs.
|
| 97 |
+
- Note that each word can be split to **more than one** tokens with different tokenlizers. The number of words in a sentence may not equal to the number of text tokens.
|
| 98 |
+
- We suggest separating different category names with `.` for Grounding DINO.
|
| 99 |
+

|
| 100 |
+

|
| 101 |
+
|
| 102 |
+
## :label: TODO
|
| 103 |
+
|
| 104 |
+
- [x] Release inference code and demo.
|
| 105 |
+
- [x] Release checkpoints.
|
| 106 |
+
- [x] Grounding DINO with Stable Diffusion and GLIGEN demos.
|
| 107 |
+
- [ ] Release training codes.
|
| 108 |
+
|
| 109 |
+
## :hammer_and_wrench: Install
|
| 110 |
+
|
| 111 |
+
**Note:**
|
| 112 |
+
|
| 113 |
+
If you have a CUDA environment, please make sure the environment variable `CUDA_HOME` is set. It will be compiled under CPU-only mode if no CUDA available.
|
| 114 |
+
|
| 115 |
+
**Installation:**
|
| 116 |
+
|
| 117 |
+
Clone the GroundingDINO repository from GitHub.
|
| 118 |
+
|
| 119 |
+
```bash
|
| 120 |
+
git clone https://github.com/IDEA-Research/GroundingDINO.git
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
Change the current directory to the GroundingDINO folder.
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
cd GroundingDINO/
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
Install the required dependencies in the current directory.
|
| 130 |
+
|
| 131 |
+
```bash
|
| 132 |
+
pip3 install -q -e .
|
| 133 |
+
```
|
| 134 |
+
Create a new directory called "weights" to store the model weights.
|
| 135 |
+
|
| 136 |
+
```bash
|
| 137 |
+
mkdir weights
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
Change the current directory to the "weights" folder.
|
| 141 |
+
|
| 142 |
+
```bash
|
| 143 |
+
cd weights
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
Download the model weights file.
|
| 147 |
+
|
| 148 |
+
```bash
|
| 149 |
+
wget -q https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
## :arrow_forward: Demo
|
| 153 |
+
Check your GPU ID (only if you're using a GPU)
|
| 154 |
+
|
| 155 |
+
```bash
|
| 156 |
+
nvidia-smi
|
| 157 |
+
```
|
| 158 |
+
Replace `{GPU ID}`, `image_you_want_to_detect.jpg`, and `"dir you want to save the output"` with appropriate values in the following command
|
| 159 |
+
```bash
|
| 160 |
+
CUDA_VISIBLE_DEVICES={GPU ID} python demo/inference_on_a_image.py \
|
| 161 |
+
-c /GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \
|
| 162 |
+
-p /GroundingDINO/weights/groundingdino_swint_ogc.pth \
|
| 163 |
+
-i image_you_want_to_detect.jpg \
|
| 164 |
+
-o "dir you want to save the output" \
|
| 165 |
+
-t "chair"
|
| 166 |
+
[--cpu-only] # open it for cpu mode
|
| 167 |
+
```
|
| 168 |
+
See the `demo/inference_on_a_image.py` for more details.
|
| 169 |
+
|
| 170 |
+
**Running with Python:**
|
| 171 |
+
|
| 172 |
+
```python
|
| 173 |
+
from groundingdino.util.inference import load_model, load_image, predict, annotate
|
| 174 |
+
import cv2
|
| 175 |
+
|
| 176 |
+
model = load_model("groundingdino/config/GroundingDINO_SwinT_OGC.py", "weights/groundingdino_swint_ogc.pth")
|
| 177 |
+
IMAGE_PATH = "weights/dog-3.jpeg"
|
| 178 |
+
TEXT_PROMPT = "chair . person . dog ."
|
| 179 |
+
BOX_TRESHOLD = 0.35
|
| 180 |
+
TEXT_TRESHOLD = 0.25
|
| 181 |
+
|
| 182 |
+
image_source, image = load_image(IMAGE_PATH)
|
| 183 |
+
|
| 184 |
+
boxes, logits, phrases = predict(
|
| 185 |
+
model=model,
|
| 186 |
+
image=image,
|
| 187 |
+
caption=TEXT_PROMPT,
|
| 188 |
+
box_threshold=BOX_TRESHOLD,
|
| 189 |
+
text_threshold=TEXT_TRESHOLD
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
annotated_frame = annotate(image_source=image_source, boxes=boxes, logits=logits, phrases=phrases)
|
| 193 |
+
cv2.imwrite("annotated_image.jpg", annotated_frame)
|
| 194 |
+
```
|
| 195 |
+
**Web UI**
|
| 196 |
+
|
| 197 |
+
We also provide a demo code to integrate Grounding DINO with Gradio Web UI. See the file `demo/gradio_app.py` for more details.
|
| 198 |
+
|
| 199 |
+
**Notebooks**
|
| 200 |
+
|
| 201 |
+
- We release [demos](demo/image_editing_with_groundingdino_gligen.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [GLIGEN](https://github.com/gligen/GLIGEN) for more controllable image editings.
|
| 202 |
+
- We release [demos](demo/image_editing_with_groundingdino_stablediffusion.ipynb) to combine [Grounding DINO](https://arxiv.org/abs/2303.05499) with [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) for image editings.
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
## :luggage: Checkpoints
|
| 206 |
+
|
| 207 |
+
<!-- insert a table -->
|
| 208 |
+
<table>
|
| 209 |
+
<thead>
|
| 210 |
+
<tr style="text-align: right;">
|
| 211 |
+
<th></th>
|
| 212 |
+
<th>name</th>
|
| 213 |
+
<th>backbone</th>
|
| 214 |
+
<th>Data</th>
|
| 215 |
+
<th>box AP on COCO</th>
|
| 216 |
+
<th>Checkpoint</th>
|
| 217 |
+
<th>Config</th>
|
| 218 |
+
</tr>
|
| 219 |
+
</thead>
|
| 220 |
+
<tbody>
|
| 221 |
+
<tr>
|
| 222 |
+
<th>1</th>
|
| 223 |
+
<td>GroundingDINO-T</td>
|
| 224 |
+
<td>Swin-T</td>
|
| 225 |
+
<td>O365,GoldG,Cap4M</td>
|
| 226 |
+
<td>48.4 (zero-shot) / 57.2 (fine-tune)</td>
|
| 227 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth">GitHub link</a> | <a href="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swint_ogc.pth">HF link</a></td>
|
| 228 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/groundingdino/config/GroundingDINO_SwinT_OGC.py">link</a></td>
|
| 229 |
+
</tr>
|
| 230 |
+
<tr>
|
| 231 |
+
<th>2</th>
|
| 232 |
+
<td>GroundingDINO-B</td>
|
| 233 |
+
<td>Swin-B</td>
|
| 234 |
+
<td>COCO,O365,GoldG,Cap4M,OpenImage,ODinW-35,RefCOCO</td>
|
| 235 |
+
<td>56.7 </td>
|
| 236 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha2/groundingdino_swinb_cogcoor.pth">GitHub link</a> | <a href="https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swinb_cogcoor.pth">HF link</a>
|
| 237 |
+
<td><a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/groundingdino/config/GroundingDINO_SwinB.cfg.py">link</a></td>
|
| 238 |
+
</tr>
|
| 239 |
+
</tbody>
|
| 240 |
+
</table>
|
| 241 |
+
|
| 242 |
+
## :medal_military: Results
|
| 243 |
+
|
| 244 |
+
<details open>
|
| 245 |
+
<summary><font size="4">
|
| 246 |
+
COCO Object Detection Results
|
| 247 |
+
</font></summary>
|
| 248 |
+
<img src=".asset/COCO.png" alt="COCO" width="100%">
|
| 249 |
+
</details>
|
| 250 |
+
|
| 251 |
+
<details open>
|
| 252 |
+
<summary><font size="4">
|
| 253 |
+
ODinW Object Detection Results
|
| 254 |
+
</font></summary>
|
| 255 |
+
<img src=".asset/ODinW.png" alt="ODinW" width="100%">
|
| 256 |
+
</details>
|
| 257 |
+
|
| 258 |
+
<details open>
|
| 259 |
+
<summary><font size="4">
|
| 260 |
+
Marrying Grounding DINO with <a href="https://github.com/Stability-AI/StableDiffusion">Stable Diffusion</a> for Image Editing
|
| 261 |
+
</font></summary>
|
| 262 |
+
See our example <a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/demo/image_editing_with_groundingdino_stablediffusion.ipynb">notebook</a> for more details.
|
| 263 |
+
<img src=".asset/GD_SD.png" alt="GD_SD" width="100%">
|
| 264 |
+
</details>
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
<details open>
|
| 268 |
+
<summary><font size="4">
|
| 269 |
+
Marrying Grounding DINO with <a href="https://github.com/gligen/GLIGEN">GLIGEN</a> for more Detailed Image Editing.
|
| 270 |
+
</font></summary>
|
| 271 |
+
See our example <a href="https://github.com/IDEA-Research/GroundingDINO/blob/main/demo/image_editing_with_groundingdino_gligen.ipynb">notebook</a> for more details.
|
| 272 |
+
<img src=".asset/GD_GLIGEN.png" alt="GD_GLIGEN" width="100%">
|
| 273 |
+
</details>
|
| 274 |
+
|
| 275 |
+
## :sauropod: Model: Grounding DINO
|
| 276 |
+
|
| 277 |
+
Includes: a text backbone, an image backbone, a feature enhancer, a language-guided query selection, and a cross-modality decoder.
|
| 278 |
+
|
| 279 |
+

|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
## :hearts: Acknowledgement
|
| 283 |
+
|
| 284 |
+
Our model is related to [DINO](https://github.com/IDEA-Research/DINO) and [GLIP](https://github.com/microsoft/GLIP). Thanks for their great work!
|
| 285 |
+
|
| 286 |
+
We also thank great previous work including DETR, Deformable DETR, SMCA, Conditional DETR, Anchor DETR, Dynamic DETR, DAB-DETR, DN-DETR, etc. More related work are available at [Awesome Detection Transformer](https://github.com/IDEACVR/awesome-detection-transformer). A new toolbox [detrex](https://github.com/IDEA-Research/detrex) is available as well.
|
| 287 |
+
|
| 288 |
+
Thanks [Stable Diffusion](https://github.com/Stability-AI/StableDiffusion) and [GLIGEN](https://github.com/gligen/GLIGEN) for their awesome models.
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
## :black_nib: Citation
|
| 292 |
+
|
| 293 |
+
If you find our work helpful for your research, please consider citing the following BibTeX entry.
|
| 294 |
+
|
| 295 |
+
```bibtex
|
| 296 |
+
@article{liu2023grounding,
|
| 297 |
+
title={Grounding dino: Marrying dino with grounded pre-training for open-set object detection},
|
| 298 |
+
author={Liu, Shilong and Zeng, Zhaoyang and Ren, Tianhe and Li, Feng and Zhang, Hao and Yang, Jie and Li, Chunyuan and Yang, Jianwei and Su, Hang and Zhu, Jun and others},
|
| 299 |
+
journal={arXiv preprint arXiv:2303.05499},
|
| 300 |
+
year={2023}
|
| 301 |
+
}
|
| 302 |
+
```
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
|
GroundingDINO/build/lib.linux-x86_64-cpython-310/groundingdino/_C.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e117adefc73963b8ee089cdf3be41102bc3c56b835195a717a50dbacf407d3bb
|
| 3 |
+
size 13644784
|
GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.o
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6e312fc4752b198c0a12e825f2d46be3d51d031be20e1faa63e427299290cf1
|
| 3 |
+
size 4783984
|
GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.o
ADDED
|
Binary file (934 kB). View file
|
|
|
GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/cuda_version.o
ADDED
|
Binary file (5.34 kB). View file
|
|
|
GroundingDINO/build/temp.linux-x86_64-cpython-310/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.o
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:183c9a28f0c9fb4810d5415bf8027b6773488898b81452a277589aa2607812b6
|
| 3 |
+
size 18513720
|
GroundingDINO/demo/create_coco_dataset.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typer
|
| 2 |
+
from groundingdino.util.inference import load_model, load_image, predict
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import torchvision
|
| 5 |
+
import torch
|
| 6 |
+
import fiftyone as fo
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def main(
|
| 10 |
+
image_directory: str = 'test_grounding_dino',
|
| 11 |
+
text_prompt: str = 'bus, car',
|
| 12 |
+
box_threshold: float = 0.15,
|
| 13 |
+
text_threshold: float = 0.10,
|
| 14 |
+
export_dataset: bool = False,
|
| 15 |
+
view_dataset: bool = False,
|
| 16 |
+
export_annotated_images: bool = True,
|
| 17 |
+
weights_path : str = "groundingdino_swint_ogc.pth",
|
| 18 |
+
config_path: str = "../../GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py",
|
| 19 |
+
subsample: int = None,
|
| 20 |
+
):
|
| 21 |
+
|
| 22 |
+
model = load_model(config_path, weights_path)
|
| 23 |
+
|
| 24 |
+
dataset = fo.Dataset.from_images_dir(image_directory)
|
| 25 |
+
|
| 26 |
+
samples = []
|
| 27 |
+
|
| 28 |
+
if subsample is not None:
|
| 29 |
+
|
| 30 |
+
if subsample < len(dataset):
|
| 31 |
+
dataset = dataset.take(subsample).clone()
|
| 32 |
+
|
| 33 |
+
for sample in tqdm(dataset):
|
| 34 |
+
|
| 35 |
+
image_source, image = load_image(sample.filepath)
|
| 36 |
+
|
| 37 |
+
boxes, logits, phrases = predict(
|
| 38 |
+
model=model,
|
| 39 |
+
image=image,
|
| 40 |
+
caption=text_prompt,
|
| 41 |
+
box_threshold=box_threshold,
|
| 42 |
+
text_threshold=text_threshold,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
detections = []
|
| 46 |
+
|
| 47 |
+
for box, logit, phrase in zip(boxes, logits, phrases):
|
| 48 |
+
|
| 49 |
+
rel_box = torchvision.ops.box_convert(box, 'cxcywh', 'xywh')
|
| 50 |
+
|
| 51 |
+
detections.append(
|
| 52 |
+
fo.Detection(
|
| 53 |
+
label=phrase,
|
| 54 |
+
bounding_box=rel_box,
|
| 55 |
+
confidence=logit,
|
| 56 |
+
))
|
| 57 |
+
|
| 58 |
+
# Store detections in a field name of your choice
|
| 59 |
+
sample["detections"] = fo.Detections(detections=detections)
|
| 60 |
+
sample.save()
|
| 61 |
+
|
| 62 |
+
# loads the voxel fiftyone UI ready for viewing the dataset.
|
| 63 |
+
if view_dataset:
|
| 64 |
+
session = fo.launch_app(dataset)
|
| 65 |
+
session.wait()
|
| 66 |
+
|
| 67 |
+
# exports COCO dataset ready for training
|
| 68 |
+
if export_dataset:
|
| 69 |
+
dataset.export(
|
| 70 |
+
'coco_dataset',
|
| 71 |
+
dataset_type=fo.types.COCODetectionDataset,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# saves bounding boxes plotted on the input images to disk
|
| 75 |
+
if export_annotated_images:
|
| 76 |
+
dataset.draw_labels(
|
| 77 |
+
'images_with_bounding_boxes',
|
| 78 |
+
label_fields=['detections']
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
if __name__ == '__main__':
|
| 83 |
+
typer.run(main)
|
GroundingDINO/demo/gradio_app.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from functools import partial
|
| 3 |
+
import cv2
|
| 4 |
+
import requests
|
| 5 |
+
import os
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import numpy as np
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
import warnings
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
# prepare the environment
|
| 17 |
+
os.system("python setup.py build develop --user")
|
| 18 |
+
os.system("pip install packaging==21.3")
|
| 19 |
+
os.system("pip install gradio")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
warnings.filterwarnings("ignore")
|
| 23 |
+
|
| 24 |
+
import gradio as gr
|
| 25 |
+
|
| 26 |
+
from groundingdino.models import build_model
|
| 27 |
+
from groundingdino.util.slconfig import SLConfig
|
| 28 |
+
from groundingdino.util.utils import clean_state_dict
|
| 29 |
+
from groundingdino.util.inference import annotate, load_image, predict
|
| 30 |
+
import groundingdino.datasets.transforms as T
|
| 31 |
+
|
| 32 |
+
from huggingface_hub import hf_hub_download
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Use this command for evaluate the Grounding DINO model
|
| 37 |
+
config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py"
|
| 38 |
+
ckpt_repo_id = "ShilongLiu/GroundingDINO"
|
| 39 |
+
ckpt_filenmae = "groundingdino_swint_ogc.pth"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
|
| 43 |
+
args = SLConfig.fromfile(model_config_path)
|
| 44 |
+
model = build_model(args)
|
| 45 |
+
args.device = device
|
| 46 |
+
|
| 47 |
+
cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 48 |
+
checkpoint = torch.load(cache_file, map_location='cpu')
|
| 49 |
+
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
|
| 50 |
+
print("Model loaded from {} \n => {}".format(cache_file, log))
|
| 51 |
+
_ = model.eval()
|
| 52 |
+
return model
|
| 53 |
+
|
| 54 |
+
def image_transform_grounding(init_image):
|
| 55 |
+
transform = T.Compose([
|
| 56 |
+
T.RandomResize([800], max_size=1333),
|
| 57 |
+
T.ToTensor(),
|
| 58 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
| 59 |
+
])
|
| 60 |
+
image, _ = transform(init_image, None) # 3, h, w
|
| 61 |
+
return init_image, image
|
| 62 |
+
|
| 63 |
+
def image_transform_grounding_for_vis(init_image):
|
| 64 |
+
transform = T.Compose([
|
| 65 |
+
T.RandomResize([800], max_size=1333),
|
| 66 |
+
])
|
| 67 |
+
image, _ = transform(init_image, None) # 3, h, w
|
| 68 |
+
return image
|
| 69 |
+
|
| 70 |
+
model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
|
| 71 |
+
|
| 72 |
+
def run_grounding(input_image, grounding_caption, box_threshold, text_threshold):
|
| 73 |
+
init_image = input_image.convert("RGB")
|
| 74 |
+
original_size = init_image.size
|
| 75 |
+
|
| 76 |
+
_, image_tensor = image_transform_grounding(init_image)
|
| 77 |
+
image_pil: Image = image_transform_grounding_for_vis(init_image)
|
| 78 |
+
|
| 79 |
+
# run grounidng
|
| 80 |
+
boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu')
|
| 81 |
+
annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases)
|
| 82 |
+
image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
return image_with_box
|
| 86 |
+
|
| 87 |
+
if __name__ == "__main__":
|
| 88 |
+
|
| 89 |
+
parser = argparse.ArgumentParser("Grounding DINO demo", add_help=True)
|
| 90 |
+
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
| 91 |
+
parser.add_argument("--share", action="store_true", help="share the app")
|
| 92 |
+
args = parser.parse_args()
|
| 93 |
+
|
| 94 |
+
block = gr.Blocks().queue()
|
| 95 |
+
with block:
|
| 96 |
+
gr.Markdown("# [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO)")
|
| 97 |
+
gr.Markdown("### Open-World Detection with Grounding DINO")
|
| 98 |
+
|
| 99 |
+
with gr.Row():
|
| 100 |
+
with gr.Column():
|
| 101 |
+
input_image = gr.Image(source='upload', type="pil")
|
| 102 |
+
grounding_caption = gr.Textbox(label="Detection Prompt")
|
| 103 |
+
run_button = gr.Button(label="Run")
|
| 104 |
+
with gr.Accordion("Advanced options", open=False):
|
| 105 |
+
box_threshold = gr.Slider(
|
| 106 |
+
label="Box Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
|
| 107 |
+
)
|
| 108 |
+
text_threshold = gr.Slider(
|
| 109 |
+
label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
with gr.Column():
|
| 113 |
+
gallery = gr.outputs.Image(
|
| 114 |
+
type="pil",
|
| 115 |
+
# label="grounding results"
|
| 116 |
+
).style(full_width=True, full_height=True)
|
| 117 |
+
# gallery = gr.Gallery(label="Generated images", show_label=False).style(
|
| 118 |
+
# grid=[1], height="auto", container=True, full_width=True, full_height=True)
|
| 119 |
+
|
| 120 |
+
run_button.click(fn=run_grounding, inputs=[
|
| 121 |
+
input_image, grounding_caption, box_threshold, text_threshold], outputs=[gallery])
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
block.launch(server_name='0.0.0.0', server_port=7579, debug=args.debug, share=args.share)
|
| 125 |
+
|
GroundingDINO/demo/image_editing_with_groundingdino_gligen.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
GroundingDINO/demo/image_editing_with_groundingdino_stablediffusion.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
GroundingDINO/demo/inference_on_a_image.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 8 |
+
|
| 9 |
+
import groundingdino.datasets.transforms as T
|
| 10 |
+
from groundingdino.models import build_model
|
| 11 |
+
from groundingdino.util import box_ops
|
| 12 |
+
from groundingdino.util.slconfig import SLConfig
|
| 13 |
+
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def plot_boxes_to_image(image_pil, tgt):
|
| 17 |
+
H, W = tgt["size"]
|
| 18 |
+
boxes = tgt["boxes"]
|
| 19 |
+
labels = tgt["labels"]
|
| 20 |
+
assert len(boxes) == len(labels), "boxes and labels must have same length"
|
| 21 |
+
|
| 22 |
+
draw = ImageDraw.Draw(image_pil)
|
| 23 |
+
mask = Image.new("L", image_pil.size, 0)
|
| 24 |
+
mask_draw = ImageDraw.Draw(mask)
|
| 25 |
+
|
| 26 |
+
# draw boxes and masks
|
| 27 |
+
for box, label in zip(boxes, labels):
|
| 28 |
+
# from 0..1 to 0..W, 0..H
|
| 29 |
+
box = box * torch.Tensor([W, H, W, H])
|
| 30 |
+
# from xywh to xyxy
|
| 31 |
+
box[:2] -= box[2:] / 2
|
| 32 |
+
box[2:] += box[:2]
|
| 33 |
+
# random color
|
| 34 |
+
color = tuple(np.random.randint(0, 255, size=3).tolist())
|
| 35 |
+
# draw
|
| 36 |
+
x0, y0, x1, y1 = box
|
| 37 |
+
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
|
| 38 |
+
|
| 39 |
+
draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
|
| 40 |
+
# draw.text((x0, y0), str(label), fill=color)
|
| 41 |
+
|
| 42 |
+
font = ImageFont.load_default()
|
| 43 |
+
if hasattr(font, "getbbox"):
|
| 44 |
+
bbox = draw.textbbox((x0, y0), str(label), font)
|
| 45 |
+
else:
|
| 46 |
+
w, h = draw.textsize(str(label), font)
|
| 47 |
+
bbox = (x0, y0, w + x0, y0 + h)
|
| 48 |
+
# bbox = draw.textbbox((x0, y0), str(label))
|
| 49 |
+
draw.rectangle(bbox, fill=color)
|
| 50 |
+
draw.text((x0, y0), str(label), fill="white")
|
| 51 |
+
|
| 52 |
+
mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
|
| 53 |
+
|
| 54 |
+
return image_pil, mask
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def load_image(image_path):
|
| 58 |
+
# load image
|
| 59 |
+
image_pil = Image.open(image_path).convert("RGB") # load image
|
| 60 |
+
|
| 61 |
+
transform = T.Compose(
|
| 62 |
+
[
|
| 63 |
+
T.RandomResize([800], max_size=1333),
|
| 64 |
+
T.ToTensor(),
|
| 65 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 66 |
+
]
|
| 67 |
+
)
|
| 68 |
+
image, _ = transform(image_pil, None) # 3, h, w
|
| 69 |
+
return image_pil, image
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def load_model(model_config_path, model_checkpoint_path, cpu_only=False):
|
| 73 |
+
args = SLConfig.fromfile(model_config_path)
|
| 74 |
+
args.device = "cuda" if not cpu_only else "cpu"
|
| 75 |
+
model = build_model(args)
|
| 76 |
+
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
|
| 77 |
+
load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
|
| 78 |
+
print(load_res)
|
| 79 |
+
_ = model.eval()
|
| 80 |
+
return model
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True, cpu_only=False):
|
| 84 |
+
caption = caption.lower()
|
| 85 |
+
caption = caption.strip()
|
| 86 |
+
if not caption.endswith("."):
|
| 87 |
+
caption = caption + "."
|
| 88 |
+
device = "cuda" if not cpu_only else "cpu"
|
| 89 |
+
model = model.to(device)
|
| 90 |
+
image = image.to(device)
|
| 91 |
+
with torch.no_grad():
|
| 92 |
+
outputs = model(image[None], captions=[caption])
|
| 93 |
+
logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
|
| 94 |
+
boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
|
| 95 |
+
logits.shape[0]
|
| 96 |
+
|
| 97 |
+
# filter output
|
| 98 |
+
logits_filt = logits.clone()
|
| 99 |
+
boxes_filt = boxes.clone()
|
| 100 |
+
filt_mask = logits_filt.max(dim=1)[0] > box_threshold
|
| 101 |
+
logits_filt = logits_filt[filt_mask] # num_filt, 256
|
| 102 |
+
boxes_filt = boxes_filt[filt_mask] # num_filt, 4
|
| 103 |
+
logits_filt.shape[0]
|
| 104 |
+
|
| 105 |
+
# get phrase
|
| 106 |
+
tokenlizer = model.tokenizer
|
| 107 |
+
tokenized = tokenlizer(caption)
|
| 108 |
+
# build pred
|
| 109 |
+
pred_phrases = []
|
| 110 |
+
for logit, box in zip(logits_filt, boxes_filt):
|
| 111 |
+
pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
|
| 112 |
+
if with_logits:
|
| 113 |
+
pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
|
| 114 |
+
else:
|
| 115 |
+
pred_phrases.append(pred_phrase)
|
| 116 |
+
|
| 117 |
+
return boxes_filt, pred_phrases
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
if __name__ == "__main__":
|
| 121 |
+
|
| 122 |
+
parser = argparse.ArgumentParser("Grounding DINO example", add_help=True)
|
| 123 |
+
parser.add_argument("--config_file", "-c", type=str, required=True, help="path to config file")
|
| 124 |
+
parser.add_argument(
|
| 125 |
+
"--checkpoint_path", "-p", type=str, required=True, help="path to checkpoint file"
|
| 126 |
+
)
|
| 127 |
+
parser.add_argument("--image_path", "-i", type=str, required=True, help="path to image file")
|
| 128 |
+
parser.add_argument("--text_prompt", "-t", type=str, required=True, help="text prompt")
|
| 129 |
+
parser.add_argument(
|
| 130 |
+
"--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
|
| 134 |
+
parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
|
| 135 |
+
|
| 136 |
+
parser.add_argument("--cpu-only", action="store_true", help="running on cpu only!, default=False")
|
| 137 |
+
args = parser.parse_args()
|
| 138 |
+
|
| 139 |
+
# cfg
|
| 140 |
+
config_file = args.config_file # change the path of the model config file
|
| 141 |
+
checkpoint_path = args.checkpoint_path # change the path of the model
|
| 142 |
+
image_path = args.image_path
|
| 143 |
+
text_prompt = args.text_prompt
|
| 144 |
+
output_dir = args.output_dir
|
| 145 |
+
box_threshold = args.box_threshold
|
| 146 |
+
text_threshold = args.text_threshold
|
| 147 |
+
|
| 148 |
+
# make dir
|
| 149 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 150 |
+
# load image
|
| 151 |
+
image_pil, image = load_image(image_path)
|
| 152 |
+
# load model
|
| 153 |
+
model = load_model(config_file, checkpoint_path, cpu_only=args.cpu_only)
|
| 154 |
+
|
| 155 |
+
# visualize raw image
|
| 156 |
+
image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
|
| 157 |
+
|
| 158 |
+
# run model
|
| 159 |
+
boxes_filt, pred_phrases = get_grounding_output(
|
| 160 |
+
model, image, text_prompt, box_threshold, text_threshold, cpu_only=args.cpu_only
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# visualize pred
|
| 164 |
+
size = image_pil.size
|
| 165 |
+
pred_dict = {
|
| 166 |
+
"boxes": boxes_filt,
|
| 167 |
+
"size": [size[1], size[0]], # H,W
|
| 168 |
+
"labels": pred_phrases,
|
| 169 |
+
}
|
| 170 |
+
# import ipdb; ipdb.set_trace()
|
| 171 |
+
image_with_box = plot_boxes_to_image(image_pil, pred_dict)[0]
|
| 172 |
+
image_with_box.save(os.path.join(output_dir, "pred.jpg"))
|
GroundingDINO/groundingdino.egg-info/PKG-INFO
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: groundingdino
|
| 3 |
+
Version: 0.1.0
|
| 4 |
+
Summary: open-set object detector
|
| 5 |
+
Home-page: https://github.com/IDEA-Research/GroundingDINO
|
| 6 |
+
Author: International Digital Economy Academy, Shilong Liu
|
| 7 |
+
License: Apache License
|
| 8 |
+
Version 2.0, January 2004
|
| 9 |
+
http://www.apache.org/licenses/
|
| 10 |
+
|
| 11 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 12 |
+
|
| 13 |
+
1. Definitions.
|
| 14 |
+
|
| 15 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 16 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 17 |
+
|
| 18 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 19 |
+
the copyright owner that is granting the License.
|
| 20 |
+
|
| 21 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 22 |
+
other entities that control, are controlled by, or are under common
|
| 23 |
+
control with that entity. For the purposes of this definition,
|
| 24 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 25 |
+
direction or management of such entity, whether by contract or
|
| 26 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 27 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 28 |
+
|
| 29 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 30 |
+
exercising permissions granted by this License.
|
| 31 |
+
|
| 32 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 33 |
+
including but not limited to software source code, documentation
|
| 34 |
+
source, and configuration files.
|
| 35 |
+
|
| 36 |
+
"Object" form shall mean any form resulting from mechanical
|
| 37 |
+
transformation or translation of a Source form, including but
|
| 38 |
+
not limited to compiled object code, generated documentation,
|
| 39 |
+
and conversions to other media types.
|
| 40 |
+
|
| 41 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 42 |
+
Object form, made available under the License, as indicated by a
|
| 43 |
+
copyright notice that is included in or attached to the work
|
| 44 |
+
(an example is provided in the Appendix below).
|
| 45 |
+
|
| 46 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 47 |
+
form, that is based on (or derived from) the Work and for which the
|
| 48 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 49 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 50 |
+
of this License, Derivative Works shall not include works that remain
|
| 51 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 52 |
+
the Work and Derivative Works thereof.
|
| 53 |
+
|
| 54 |
+
"Contribution" shall mean any work of authorship, including
|
| 55 |
+
the original version of the Work and any modifications or additions
|
| 56 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 57 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 58 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 59 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 60 |
+
means any form of electronic, verbal, or written communication sent
|
| 61 |
+
to the Licensor or its representatives, including but not limited to
|
| 62 |
+
communication on electronic mailing lists, source code control systems,
|
| 63 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 64 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 65 |
+
excluding communication that is conspicuously marked or otherwise
|
| 66 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 67 |
+
|
| 68 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 69 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 70 |
+
subsequently incorporated within the Work.
|
| 71 |
+
|
| 72 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 73 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 74 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 75 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 76 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 77 |
+
Work and such Derivative Works in Source or Object form.
|
| 78 |
+
|
| 79 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 80 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 81 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 82 |
+
(except as stated in this section) patent license to make, have made,
|
| 83 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 84 |
+
where such license applies only to those patent claims licensable
|
| 85 |
+
by such Contributor that are necessarily infringed by their
|
| 86 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 87 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 88 |
+
institute patent litigation against any entity (including a
|
| 89 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 90 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 91 |
+
or contributory patent infringement, then any patent licenses
|
| 92 |
+
granted to You under this License for that Work shall terminate
|
| 93 |
+
as of the date such litigation is filed.
|
| 94 |
+
|
| 95 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 96 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 97 |
+
modifications, and in Source or Object form, provided that You
|
| 98 |
+
meet the following conditions:
|
| 99 |
+
|
| 100 |
+
(a) You must give any other recipients of the Work or
|
| 101 |
+
Derivative Works a copy of this License; and
|
| 102 |
+
|
| 103 |
+
(b) You must cause any modified files to carry prominent notices
|
| 104 |
+
stating that You changed the files; and
|
| 105 |
+
|
| 106 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 107 |
+
that You distribute, all copyright, patent, trademark, and
|
| 108 |
+
attribution notices from the Source form of the Work,
|
| 109 |
+
excluding those notices that do not pertain to any part of
|
| 110 |
+
the Derivative Works; and
|
| 111 |
+
|
| 112 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 113 |
+
distribution, then any Derivative Works that You distribute must
|
| 114 |
+
include a readable copy of the attribution notices contained
|
| 115 |
+
within such NOTICE file, excluding those notices that do not
|
| 116 |
+
pertain to any part of the Derivative Works, in at least one
|
| 117 |
+
of the following places: within a NOTICE text file distributed
|
| 118 |
+
as part of the Derivative Works; within the Source form or
|
| 119 |
+
documentation, if provided along with the Derivative Works; or,
|
| 120 |
+
within a display generated by the Derivative Works, if and
|
| 121 |
+
wherever such third-party notices normally appear. The contents
|
| 122 |
+
of the NOTICE file are for informational purposes only and
|
| 123 |
+
do not modify the License. You may add Your own attribution
|
| 124 |
+
notices within Derivative Works that You distribute, alongside
|
| 125 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 126 |
+
that such additional attribution notices cannot be construed
|
| 127 |
+
as modifying the License.
|
| 128 |
+
|
| 129 |
+
You may add Your own copyright statement to Your modifications and
|
| 130 |
+
may provide additional or different license terms and conditions
|
| 131 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 132 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 133 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 134 |
+
the conditions stated in this License.
|
| 135 |
+
|
| 136 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 137 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 138 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 139 |
+
this License, without any additional terms or conditions.
|
| 140 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 141 |
+
the terms of any separate license agreement you may have executed
|
| 142 |
+
with Licensor regarding such Contributions.
|
| 143 |
+
|
| 144 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 145 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 146 |
+
except as required for reasonable and customary use in describing the
|
| 147 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 148 |
+
|
| 149 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 150 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 151 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 152 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 153 |
+
implied, including, without limitation, any warranties or conditions
|
| 154 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 155 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 156 |
+
appropriateness of using or redistributing the Work and assume any
|
| 157 |
+
risks associated with Your exercise of permissions under this License.
|
| 158 |
+
|
| 159 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 160 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 161 |
+
unless required by applicable law (such as deliberate and grossly
|
| 162 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 163 |
+
liable to You for damages, including any direct, indirect, special,
|
| 164 |
+
incidental, or consequential damages of any character arising as a
|
| 165 |
+
result of this License or out of the use or inability to use the
|
| 166 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 167 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 168 |
+
other commercial damages or losses), even if such Contributor
|
| 169 |
+
has been advised of the possibility of such damages.
|
| 170 |
+
|
| 171 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 172 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 173 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 174 |
+
or other liability obligations and/or rights consistent with this
|
| 175 |
+
License. However, in accepting such obligations, You may act only
|
| 176 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 177 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 178 |
+
defend, and hold each Contributor harmless for any liability
|
| 179 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 180 |
+
of your accepting any such warranty or additional liability.
|
| 181 |
+
|
| 182 |
+
END OF TERMS AND CONDITIONS
|
| 183 |
+
|
| 184 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 185 |
+
|
| 186 |
+
To apply the Apache License to your work, attach the following
|
| 187 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 188 |
+
replaced with your own identifying information. (Don't include
|
| 189 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 190 |
+
comment syntax for the file format. We also recommend that a
|
| 191 |
+
file or class name and description of purpose be included on the
|
| 192 |
+
same "printed page" as the copyright notice for easier
|
| 193 |
+
identification within third-party archives.
|
| 194 |
+
|
| 195 |
+
Copyright 2020 - present, Facebook, Inc
|
| 196 |
+
|
| 197 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 198 |
+
you may not use this file except in compliance with the License.
|
| 199 |
+
You may obtain a copy of the License at
|
| 200 |
+
|
| 201 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 202 |
+
|
| 203 |
+
Unless required by applicable law or agreed to in writing, software
|
| 204 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 205 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 206 |
+
See the License for the specific language governing permissions and
|
| 207 |
+
limitations under the License.
|
| 208 |
+
|
| 209 |
+
License-File: LICENSE
|
GroundingDINO/groundingdino.egg-info/SOURCES.txt
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LICENSE
|
| 2 |
+
README.md
|
| 3 |
+
setup.py
|
| 4 |
+
/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/cuda_version.cu
|
| 5 |
+
/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/vision.cpp
|
| 6 |
+
/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp
|
| 7 |
+
/content/drive/My Drive/AI/matte/Matte-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.cu
|
| 8 |
+
groundingdino/__init__.py
|
| 9 |
+
groundingdino/version.py
|
| 10 |
+
groundingdino.egg-info/PKG-INFO
|
| 11 |
+
groundingdino.egg-info/SOURCES.txt
|
| 12 |
+
groundingdino.egg-info/dependency_links.txt
|
| 13 |
+
groundingdino.egg-info/requires.txt
|
| 14 |
+
groundingdino.egg-info/top_level.txt
|
| 15 |
+
groundingdino/config/GroundingDINO_SwinB_cfg.py
|
| 16 |
+
groundingdino/config/GroundingDINO_SwinT_OGC.py
|
| 17 |
+
groundingdino/config/__init__.py
|
| 18 |
+
groundingdino/datasets/__init__.py
|
| 19 |
+
groundingdino/datasets/transforms.py
|
| 20 |
+
groundingdino/models/__init__.py
|
| 21 |
+
groundingdino/models/registry.py
|
| 22 |
+
groundingdino/models/GroundingDINO/__init__.py
|
| 23 |
+
groundingdino/models/GroundingDINO/bertwarper.py
|
| 24 |
+
groundingdino/models/GroundingDINO/fuse_modules.py
|
| 25 |
+
groundingdino/models/GroundingDINO/groundingdino.py
|
| 26 |
+
groundingdino/models/GroundingDINO/ms_deform_attn.py
|
| 27 |
+
groundingdino/models/GroundingDINO/transformer.py
|
| 28 |
+
groundingdino/models/GroundingDINO/transformer_vanilla.py
|
| 29 |
+
groundingdino/models/GroundingDINO/utils.py
|
| 30 |
+
groundingdino/models/GroundingDINO/backbone/__init__.py
|
| 31 |
+
groundingdino/models/GroundingDINO/backbone/backbone.py
|
| 32 |
+
groundingdino/models/GroundingDINO/backbone/position_encoding.py
|
| 33 |
+
groundingdino/models/GroundingDINO/backbone/swin_transformer.py
|
| 34 |
+
groundingdino/util/__init__.py
|
| 35 |
+
groundingdino/util/box_ops.py
|
| 36 |
+
groundingdino/util/get_tokenlizer.py
|
| 37 |
+
groundingdino/util/inference.py
|
| 38 |
+
groundingdino/util/logger.py
|
| 39 |
+
groundingdino/util/misc.py
|
| 40 |
+
groundingdino/util/slconfig.py
|
| 41 |
+
groundingdino/util/slio.py
|
| 42 |
+
groundingdino/util/time_counter.py
|
| 43 |
+
groundingdino/util/utils.py
|
| 44 |
+
groundingdino/util/visualizer.py
|
| 45 |
+
groundingdino/util/vl_utils.py
|
GroundingDINO/groundingdino.egg-info/dependency_links.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
GroundingDINO/groundingdino.egg-info/requires.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
torchvision
|
| 3 |
+
transformers
|
| 4 |
+
addict
|
| 5 |
+
yapf
|
| 6 |
+
timm
|
| 7 |
+
numpy
|
| 8 |
+
opencv-python
|
| 9 |
+
supervision==0.6.0
|
| 10 |
+
pycocotools
|
GroundingDINO/groundingdino.egg-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
groundingdino
|
GroundingDINO/groundingdino/_C.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e117adefc73963b8ee089cdf3be41102bc3c56b835195a717a50dbacf407d3bb
|
| 3 |
+
size 13644784
|
GroundingDINO/groundingdino/__init__.py
ADDED
|
File without changes
|
GroundingDINO/groundingdino/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
GroundingDINO/groundingdino/config/GroundingDINO_SwinB_cfg.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size = 1
|
| 2 |
+
modelname = "groundingdino"
|
| 3 |
+
backbone = "swin_B_384_22k"
|
| 4 |
+
position_embedding = "sine"
|
| 5 |
+
pe_temperatureH = 20
|
| 6 |
+
pe_temperatureW = 20
|
| 7 |
+
return_interm_indices = [1, 2, 3]
|
| 8 |
+
backbone_freeze_keywords = None
|
| 9 |
+
enc_layers = 6
|
| 10 |
+
dec_layers = 6
|
| 11 |
+
pre_norm = False
|
| 12 |
+
dim_feedforward = 2048
|
| 13 |
+
hidden_dim = 256
|
| 14 |
+
dropout = 0.0
|
| 15 |
+
nheads = 8
|
| 16 |
+
num_queries = 900
|
| 17 |
+
query_dim = 4
|
| 18 |
+
num_patterns = 0
|
| 19 |
+
num_feature_levels = 4
|
| 20 |
+
enc_n_points = 4
|
| 21 |
+
dec_n_points = 4
|
| 22 |
+
two_stage_type = "standard"
|
| 23 |
+
two_stage_bbox_embed_share = False
|
| 24 |
+
two_stage_class_embed_share = False
|
| 25 |
+
transformer_activation = "relu"
|
| 26 |
+
dec_pred_bbox_embed_share = True
|
| 27 |
+
dn_box_noise_scale = 1.0
|
| 28 |
+
dn_label_noise_ratio = 0.5
|
| 29 |
+
dn_label_coef = 1.0
|
| 30 |
+
dn_bbox_coef = 1.0
|
| 31 |
+
embed_init_tgt = True
|
| 32 |
+
dn_labelbook_size = 2000
|
| 33 |
+
max_text_len = 256
|
| 34 |
+
text_encoder_type = "bert-base-uncased"
|
| 35 |
+
use_text_enhancer = True
|
| 36 |
+
use_fusion_layer = True
|
| 37 |
+
use_checkpoint = True
|
| 38 |
+
use_transformer_ckpt = True
|
| 39 |
+
use_text_cross_attention = True
|
| 40 |
+
text_dropout = 0.0
|
| 41 |
+
fusion_dropout = 0.0
|
| 42 |
+
fusion_droppath = 0.1
|
| 43 |
+
sub_sentence_present = True
|
GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size = 1
|
| 2 |
+
modelname = "groundingdino"
|
| 3 |
+
backbone = "swin_T_224_1k"
|
| 4 |
+
position_embedding = "sine"
|
| 5 |
+
pe_temperatureH = 20
|
| 6 |
+
pe_temperatureW = 20
|
| 7 |
+
return_interm_indices = [1, 2, 3]
|
| 8 |
+
backbone_freeze_keywords = None
|
| 9 |
+
enc_layers = 6
|
| 10 |
+
dec_layers = 6
|
| 11 |
+
pre_norm = False
|
| 12 |
+
dim_feedforward = 2048
|
| 13 |
+
hidden_dim = 256
|
| 14 |
+
dropout = 0.0
|
| 15 |
+
nheads = 8
|
| 16 |
+
num_queries = 900
|
| 17 |
+
query_dim = 4
|
| 18 |
+
num_patterns = 0
|
| 19 |
+
num_feature_levels = 4
|
| 20 |
+
enc_n_points = 4
|
| 21 |
+
dec_n_points = 4
|
| 22 |
+
two_stage_type = "standard"
|
| 23 |
+
two_stage_bbox_embed_share = False
|
| 24 |
+
two_stage_class_embed_share = False
|
| 25 |
+
transformer_activation = "relu"
|
| 26 |
+
dec_pred_bbox_embed_share = True
|
| 27 |
+
dn_box_noise_scale = 1.0
|
| 28 |
+
dn_label_noise_ratio = 0.5
|
| 29 |
+
dn_label_coef = 1.0
|
| 30 |
+
dn_bbox_coef = 1.0
|
| 31 |
+
embed_init_tgt = True
|
| 32 |
+
dn_labelbook_size = 2000
|
| 33 |
+
max_text_len = 256
|
| 34 |
+
text_encoder_type = "bert-base-uncased"
|
| 35 |
+
use_text_enhancer = True
|
| 36 |
+
use_fusion_layer = True
|
| 37 |
+
use_checkpoint = True
|
| 38 |
+
use_transformer_ckpt = True
|
| 39 |
+
use_text_cross_attention = True
|
| 40 |
+
text_dropout = 0.0
|
| 41 |
+
fusion_dropout = 0.0
|
| 42 |
+
fusion_droppath = 0.1
|
| 43 |
+
sub_sentence_present = True
|
GroundingDINO/groundingdino/config/__init__.py
ADDED
|
File without changes
|
GroundingDINO/groundingdino/datasets/__init__.py
ADDED
|
File without changes
|
GroundingDINO/groundingdino/datasets/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (189 Bytes). View file
|
|
|
GroundingDINO/groundingdino/datasets/__pycache__/transforms.cpython-310.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
GroundingDINO/groundingdino/datasets/transforms.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Transforms and data augmentation for both image + bbox.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
|
| 8 |
+
import PIL
|
| 9 |
+
import torch
|
| 10 |
+
import torchvision.transforms as T
|
| 11 |
+
import torchvision.transforms.functional as F
|
| 12 |
+
|
| 13 |
+
from groundingdino.util.box_ops import box_xyxy_to_cxcywh
|
| 14 |
+
from groundingdino.util.misc import interpolate
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def crop(image, target, region):
|
| 18 |
+
cropped_image = F.crop(image, *region)
|
| 19 |
+
|
| 20 |
+
target = target.copy()
|
| 21 |
+
i, j, h, w = region
|
| 22 |
+
|
| 23 |
+
# should we do something wrt the original size?
|
| 24 |
+
target["size"] = torch.tensor([h, w])
|
| 25 |
+
|
| 26 |
+
fields = ["labels", "area", "iscrowd", "positive_map"]
|
| 27 |
+
|
| 28 |
+
if "boxes" in target:
|
| 29 |
+
boxes = target["boxes"]
|
| 30 |
+
max_size = torch.as_tensor([w, h], dtype=torch.float32)
|
| 31 |
+
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
|
| 32 |
+
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
|
| 33 |
+
cropped_boxes = cropped_boxes.clamp(min=0)
|
| 34 |
+
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
|
| 35 |
+
target["boxes"] = cropped_boxes.reshape(-1, 4)
|
| 36 |
+
target["area"] = area
|
| 37 |
+
fields.append("boxes")
|
| 38 |
+
|
| 39 |
+
if "masks" in target:
|
| 40 |
+
# FIXME should we update the area here if there are no boxes?
|
| 41 |
+
target["masks"] = target["masks"][:, i : i + h, j : j + w]
|
| 42 |
+
fields.append("masks")
|
| 43 |
+
|
| 44 |
+
# remove elements for which the boxes or masks that have zero area
|
| 45 |
+
if "boxes" in target or "masks" in target:
|
| 46 |
+
# favor boxes selection when defining which elements to keep
|
| 47 |
+
# this is compatible with previous implementation
|
| 48 |
+
if "boxes" in target:
|
| 49 |
+
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
|
| 50 |
+
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
|
| 51 |
+
else:
|
| 52 |
+
keep = target["masks"].flatten(1).any(1)
|
| 53 |
+
|
| 54 |
+
for field in fields:
|
| 55 |
+
if field in target:
|
| 56 |
+
target[field] = target[field][keep]
|
| 57 |
+
|
| 58 |
+
if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO":
|
| 59 |
+
# for debug and visualization only.
|
| 60 |
+
if "strings_positive" in target:
|
| 61 |
+
target["strings_positive"] = [
|
| 62 |
+
_i for _i, _j in zip(target["strings_positive"], keep) if _j
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
return cropped_image, target
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def hflip(image, target):
|
| 69 |
+
flipped_image = F.hflip(image)
|
| 70 |
+
|
| 71 |
+
w, h = image.size
|
| 72 |
+
|
| 73 |
+
target = target.copy()
|
| 74 |
+
if "boxes" in target:
|
| 75 |
+
boxes = target["boxes"]
|
| 76 |
+
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor(
|
| 77 |
+
[w, 0, w, 0]
|
| 78 |
+
)
|
| 79 |
+
target["boxes"] = boxes
|
| 80 |
+
|
| 81 |
+
if "masks" in target:
|
| 82 |
+
target["masks"] = target["masks"].flip(-1)
|
| 83 |
+
|
| 84 |
+
return flipped_image, target
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def resize(image, target, size, max_size=None):
|
| 88 |
+
# size can be min_size (scalar) or (w, h) tuple
|
| 89 |
+
|
| 90 |
+
def get_size_with_aspect_ratio(image_size, size, max_size=None):
|
| 91 |
+
w, h = image_size
|
| 92 |
+
if max_size is not None:
|
| 93 |
+
min_original_size = float(min((w, h)))
|
| 94 |
+
max_original_size = float(max((w, h)))
|
| 95 |
+
if max_original_size / min_original_size * size > max_size:
|
| 96 |
+
size = int(round(max_size * min_original_size / max_original_size))
|
| 97 |
+
|
| 98 |
+
if (w <= h and w == size) or (h <= w and h == size):
|
| 99 |
+
return (h, w)
|
| 100 |
+
|
| 101 |
+
if w < h:
|
| 102 |
+
ow = size
|
| 103 |
+
oh = int(size * h / w)
|
| 104 |
+
else:
|
| 105 |
+
oh = size
|
| 106 |
+
ow = int(size * w / h)
|
| 107 |
+
|
| 108 |
+
return (oh, ow)
|
| 109 |
+
|
| 110 |
+
def get_size(image_size, size, max_size=None):
|
| 111 |
+
if isinstance(size, (list, tuple)):
|
| 112 |
+
return size[::-1]
|
| 113 |
+
else:
|
| 114 |
+
return get_size_with_aspect_ratio(image_size, size, max_size)
|
| 115 |
+
|
| 116 |
+
size = get_size(image.size, size, max_size)
|
| 117 |
+
rescaled_image = F.resize(image, size)
|
| 118 |
+
|
| 119 |
+
if target is None:
|
| 120 |
+
return rescaled_image, None
|
| 121 |
+
|
| 122 |
+
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
|
| 123 |
+
ratio_width, ratio_height = ratios
|
| 124 |
+
|
| 125 |
+
target = target.copy()
|
| 126 |
+
if "boxes" in target:
|
| 127 |
+
boxes = target["boxes"]
|
| 128 |
+
scaled_boxes = boxes * torch.as_tensor(
|
| 129 |
+
[ratio_width, ratio_height, ratio_width, ratio_height]
|
| 130 |
+
)
|
| 131 |
+
target["boxes"] = scaled_boxes
|
| 132 |
+
|
| 133 |
+
if "area" in target:
|
| 134 |
+
area = target["area"]
|
| 135 |
+
scaled_area = area * (ratio_width * ratio_height)
|
| 136 |
+
target["area"] = scaled_area
|
| 137 |
+
|
| 138 |
+
h, w = size
|
| 139 |
+
target["size"] = torch.tensor([h, w])
|
| 140 |
+
|
| 141 |
+
if "masks" in target:
|
| 142 |
+
target["masks"] = (
|
| 143 |
+
interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
return rescaled_image, target
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def pad(image, target, padding):
|
| 150 |
+
# assumes that we only pad on the bottom right corners
|
| 151 |
+
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
|
| 152 |
+
if target is None:
|
| 153 |
+
return padded_image, None
|
| 154 |
+
target = target.copy()
|
| 155 |
+
# should we do something wrt the original size?
|
| 156 |
+
target["size"] = torch.tensor(padded_image.size[::-1])
|
| 157 |
+
if "masks" in target:
|
| 158 |
+
target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1]))
|
| 159 |
+
return padded_image, target
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class ResizeDebug(object):
|
| 163 |
+
def __init__(self, size):
|
| 164 |
+
self.size = size
|
| 165 |
+
|
| 166 |
+
def __call__(self, img, target):
|
| 167 |
+
return resize(img, target, self.size)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class RandomCrop(object):
|
| 171 |
+
def __init__(self, size):
|
| 172 |
+
self.size = size
|
| 173 |
+
|
| 174 |
+
def __call__(self, img, target):
|
| 175 |
+
region = T.RandomCrop.get_params(img, self.size)
|
| 176 |
+
return crop(img, target, region)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class RandomSizeCrop(object):
|
| 180 |
+
def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False):
|
| 181 |
+
# respect_boxes: True to keep all boxes
|
| 182 |
+
# False to tolerence box filter
|
| 183 |
+
self.min_size = min_size
|
| 184 |
+
self.max_size = max_size
|
| 185 |
+
self.respect_boxes = respect_boxes
|
| 186 |
+
|
| 187 |
+
def __call__(self, img: PIL.Image.Image, target: dict):
|
| 188 |
+
init_boxes = len(target["boxes"])
|
| 189 |
+
max_patience = 10
|
| 190 |
+
for i in range(max_patience):
|
| 191 |
+
w = random.randint(self.min_size, min(img.width, self.max_size))
|
| 192 |
+
h = random.randint(self.min_size, min(img.height, self.max_size))
|
| 193 |
+
region = T.RandomCrop.get_params(img, [h, w])
|
| 194 |
+
result_img, result_target = crop(img, target, region)
|
| 195 |
+
if (
|
| 196 |
+
not self.respect_boxes
|
| 197 |
+
or len(result_target["boxes"]) == init_boxes
|
| 198 |
+
or i == max_patience - 1
|
| 199 |
+
):
|
| 200 |
+
return result_img, result_target
|
| 201 |
+
return result_img, result_target
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class CenterCrop(object):
|
| 205 |
+
def __init__(self, size):
|
| 206 |
+
self.size = size
|
| 207 |
+
|
| 208 |
+
def __call__(self, img, target):
|
| 209 |
+
image_width, image_height = img.size
|
| 210 |
+
crop_height, crop_width = self.size
|
| 211 |
+
crop_top = int(round((image_height - crop_height) / 2.0))
|
| 212 |
+
crop_left = int(round((image_width - crop_width) / 2.0))
|
| 213 |
+
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class RandomHorizontalFlip(object):
|
| 217 |
+
def __init__(self, p=0.5):
|
| 218 |
+
self.p = p
|
| 219 |
+
|
| 220 |
+
def __call__(self, img, target):
|
| 221 |
+
if random.random() < self.p:
|
| 222 |
+
return hflip(img, target)
|
| 223 |
+
return img, target
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class RandomResize(object):
|
| 227 |
+
def __init__(self, sizes, max_size=None):
|
| 228 |
+
assert isinstance(sizes, (list, tuple))
|
| 229 |
+
self.sizes = sizes
|
| 230 |
+
self.max_size = max_size
|
| 231 |
+
|
| 232 |
+
def __call__(self, img, target=None):
|
| 233 |
+
size = random.choice(self.sizes)
|
| 234 |
+
return resize(img, target, size, self.max_size)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
class RandomPad(object):
|
| 238 |
+
def __init__(self, max_pad):
|
| 239 |
+
self.max_pad = max_pad
|
| 240 |
+
|
| 241 |
+
def __call__(self, img, target):
|
| 242 |
+
pad_x = random.randint(0, self.max_pad)
|
| 243 |
+
pad_y = random.randint(0, self.max_pad)
|
| 244 |
+
return pad(img, target, (pad_x, pad_y))
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class RandomSelect(object):
|
| 248 |
+
"""
|
| 249 |
+
Randomly selects between transforms1 and transforms2,
|
| 250 |
+
with probability p for transforms1 and (1 - p) for transforms2
|
| 251 |
+
"""
|
| 252 |
+
|
| 253 |
+
def __init__(self, transforms1, transforms2, p=0.5):
|
| 254 |
+
self.transforms1 = transforms1
|
| 255 |
+
self.transforms2 = transforms2
|
| 256 |
+
self.p = p
|
| 257 |
+
|
| 258 |
+
def __call__(self, img, target):
|
| 259 |
+
if random.random() < self.p:
|
| 260 |
+
return self.transforms1(img, target)
|
| 261 |
+
return self.transforms2(img, target)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class ToTensor(object):
|
| 265 |
+
def __call__(self, img, target):
|
| 266 |
+
return F.to_tensor(img), target
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class RandomErasing(object):
|
| 270 |
+
def __init__(self, *args, **kwargs):
|
| 271 |
+
self.eraser = T.RandomErasing(*args, **kwargs)
|
| 272 |
+
|
| 273 |
+
def __call__(self, img, target):
|
| 274 |
+
return self.eraser(img), target
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class Normalize(object):
|
| 278 |
+
def __init__(self, mean, std):
|
| 279 |
+
self.mean = mean
|
| 280 |
+
self.std = std
|
| 281 |
+
|
| 282 |
+
def __call__(self, image, target=None):
|
| 283 |
+
image = F.normalize(image, mean=self.mean, std=self.std)
|
| 284 |
+
if target is None:
|
| 285 |
+
return image, None
|
| 286 |
+
target = target.copy()
|
| 287 |
+
h, w = image.shape[-2:]
|
| 288 |
+
if "boxes" in target:
|
| 289 |
+
boxes = target["boxes"]
|
| 290 |
+
boxes = box_xyxy_to_cxcywh(boxes)
|
| 291 |
+
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
|
| 292 |
+
target["boxes"] = boxes
|
| 293 |
+
return image, target
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
class Compose(object):
|
| 297 |
+
def __init__(self, transforms):
|
| 298 |
+
self.transforms = transforms
|
| 299 |
+
|
| 300 |
+
def __call__(self, image, target):
|
| 301 |
+
for t in self.transforms:
|
| 302 |
+
image, target = t(image, target)
|
| 303 |
+
return image, target
|
| 304 |
+
|
| 305 |
+
def __repr__(self):
|
| 306 |
+
format_string = self.__class__.__name__ + "("
|
| 307 |
+
for t in self.transforms:
|
| 308 |
+
format_string += "\n"
|
| 309 |
+
format_string += " {0}".format(t)
|
| 310 |
+
format_string += "\n)"
|
| 311 |
+
return format_string
|
GroundingDINO/groundingdino/models/GroundingDINO/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------
|
| 2 |
+
# Grounding DINO
|
| 3 |
+
# url: https://github.com/IDEA-Research/GroundingDINO
|
| 4 |
+
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 6 |
+
# ------------------------------------------------------------------------
|
| 7 |
+
# Conditional DETR
|
| 8 |
+
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
| 9 |
+
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
| 10 |
+
# ------------------------------------------------------------------------
|
| 11 |
+
# Copied from DETR (https://github.com/facebookresearch/detr)
|
| 12 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
| 13 |
+
# ------------------------------------------------------------------------
|
| 14 |
+
|
| 15 |
+
from .groundingdino import build_groundingdino
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (258 Bytes). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/bertwarper.cpython-310.pyc
ADDED
|
Binary file (7.23 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/fuse_modules.cpython-310.pyc
ADDED
|
Binary file (7.78 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/groundingdino.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/ms_deform_attn.cpython-310.pyc
ADDED
|
Binary file (11.8 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/transformer.cpython-310.pyc
ADDED
|
Binary file (19.3 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/transformer_vanilla.cpython-310.pyc
ADDED
|
Binary file (3.45 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (9.58 kB). View file
|
|
|
GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .backbone import build_backbone
|