Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/mod_with_constant.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/script-with-bom.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/server.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_archive_util.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_bdist_wheel.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_build_ext.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_build_py.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_core_metadata.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_install_scripts.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_manifest.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_namespaces.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_virtualenv.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_wheel.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/__init__.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_expand.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_pyprojecttoml.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_pyprojecttoml_dynamic_deps.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html +3 -0
- llava/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/simple/foobar/index.html +4 -0
- llava/lib/python3.10/site-packages/setuptools/tests/integration/__init__.py +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/__init__.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc +0 -0
- llava/lib/python3.10/site-packages/setuptools/tests/integration/helpers.py +77 -0
- llava/lib/python3.10/site-packages/setuptools/tests/integration/test_pip_install_sdist.py +223 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/classification.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/coco_metric.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/eval_datasets.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/evaluate.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/imagenet_utils.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/ok_vqa_utils.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/vqa_metric.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/classification.py +147 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/coco_metric.py +22 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/eval/imagenet_utils.py +1007 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/__init__.py +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/flamingo.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/helpers.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/utils.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/flamingo.py +198 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/flamingo_lm.py +138 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/helpers.py +275 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/src/utils.py +31 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/train/__init__.py +1 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/train/__pycache__/__init__.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/train/__pycache__/data.cpython-310.pyc +0 -0
- minigpt2/lib/python3.10/site-packages/open_flamingo/train/__pycache__/distributed.cpython-310.pyc +0 -0
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/mod_with_constant.cpython-310.pyc
ADDED
|
Binary file (180 Bytes). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/script-with-bom.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/server.cpython-310.pyc
ADDED
|
Binary file (3.38 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_archive_util.cpython-310.pyc
ADDED
|
Binary file (1.15 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_bdist_wheel.cpython-310.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_build_ext.cpython-310.pyc
ADDED
|
Binary file (9.37 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_build_py.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_core_metadata.cpython-310.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_install_scripts.cpython-310.pyc
ADDED
|
Binary file (3.66 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_manifest.cpython-310.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_namespaces.cpython-310.pyc
ADDED
|
Binary file (3.48 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_virtualenv.cpython-310.pyc
ADDED
|
Binary file (2.78 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/__pycache__/test_wheel.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (157 Bytes). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_expand.cpython-310.pyc
ADDED
|
Binary file (7.79 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_pyprojecttoml.cpython-310.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_pyprojecttoml_dynamic_deps.cpython-310.pyc
ADDED
|
Binary file (3.37 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<html><body>
|
| 2 |
+
<a href="/foobar-0.1.tar.gz#md5=1__bad_md5___">bad old link</a>
|
| 3 |
+
</body></html>
|
llava/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<html><body>
|
| 2 |
+
<a href="/foobar-0.1.tar.gz#md5=0_correct_md5">foobar-0.1.tar.gz</a><br/>
|
| 3 |
+
<a href="../../external.html" rel="homepage">external homepage</a><br/>
|
| 4 |
+
</body></html>
|
llava/lib/python3.10/site-packages/setuptools/tests/integration/__init__.py
ADDED
|
File without changes
|
llava/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (162 Bytes). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc
ADDED
|
Binary file (3.18 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc
ADDED
|
Binary file (6.1 kB). View file
|
|
|
llava/lib/python3.10/site-packages/setuptools/tests/integration/helpers.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reusable functions and classes for different types of integration tests.
|
| 2 |
+
|
| 3 |
+
For example ``Archive`` can be used to check the contents of distribution built
|
| 4 |
+
with setuptools, and ``run`` will always try to be as verbose as possible to
|
| 5 |
+
facilitate debugging.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import subprocess
|
| 10 |
+
import tarfile
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from zipfile import ZipFile
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def run(cmd, env=None):
|
| 16 |
+
r = subprocess.run(
|
| 17 |
+
cmd,
|
| 18 |
+
capture_output=True,
|
| 19 |
+
text=True,
|
| 20 |
+
encoding="utf-8",
|
| 21 |
+
env={**os.environ, **(env or {})},
|
| 22 |
+
# ^-- allow overwriting instead of discarding the current env
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
out = r.stdout + "\n" + r.stderr
|
| 26 |
+
# pytest omits stdout/err by default, if the test fails they help debugging
|
| 27 |
+
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
| 28 |
+
print(f"Command: {cmd}\nreturn code: {r.returncode}\n\n{out}")
|
| 29 |
+
|
| 30 |
+
if r.returncode == 0:
|
| 31 |
+
return out
|
| 32 |
+
raise subprocess.CalledProcessError(r.returncode, cmd, r.stdout, r.stderr)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class Archive:
|
| 36 |
+
"""Compatibility layer for ZipFile/Info and TarFile/Info"""
|
| 37 |
+
|
| 38 |
+
def __init__(self, filename):
|
| 39 |
+
self._filename = filename
|
| 40 |
+
if filename.endswith("tar.gz"):
|
| 41 |
+
self._obj = tarfile.open(filename, "r:gz")
|
| 42 |
+
elif filename.endswith("zip"):
|
| 43 |
+
self._obj = ZipFile(filename)
|
| 44 |
+
else:
|
| 45 |
+
raise ValueError(f"{filename} doesn't seem to be a zip or tar.gz")
|
| 46 |
+
|
| 47 |
+
def __iter__(self):
|
| 48 |
+
if hasattr(self._obj, "infolist"):
|
| 49 |
+
return iter(self._obj.infolist())
|
| 50 |
+
return iter(self._obj)
|
| 51 |
+
|
| 52 |
+
def get_name(self, zip_or_tar_info):
|
| 53 |
+
if hasattr(zip_or_tar_info, "filename"):
|
| 54 |
+
return zip_or_tar_info.filename
|
| 55 |
+
return zip_or_tar_info.name
|
| 56 |
+
|
| 57 |
+
def get_content(self, zip_or_tar_info):
|
| 58 |
+
if hasattr(self._obj, "extractfile"):
|
| 59 |
+
content = self._obj.extractfile(zip_or_tar_info)
|
| 60 |
+
if content is None:
|
| 61 |
+
msg = f"Invalid {zip_or_tar_info.name} in {self._filename}"
|
| 62 |
+
raise ValueError(msg)
|
| 63 |
+
return str(content.read(), "utf-8")
|
| 64 |
+
return str(self._obj.read(zip_or_tar_info), "utf-8")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_sdist_members(sdist_path):
|
| 68 |
+
with tarfile.open(sdist_path, "r:gz") as tar:
|
| 69 |
+
files = [Path(f) for f in tar.getnames()]
|
| 70 |
+
# remove root folder
|
| 71 |
+
relative_files = ("/".join(f.parts[1:]) for f in files)
|
| 72 |
+
return {f for f in relative_files if f}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def get_wheel_members(wheel_path):
|
| 76 |
+
with ZipFile(wheel_path) as zipfile:
|
| 77 |
+
return set(zipfile.namelist())
|
llava/lib/python3.10/site-packages/setuptools/tests/integration/test_pip_install_sdist.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# https://github.com/python/mypy/issues/16936
|
| 2 |
+
# mypy: disable-error-code="has-type"
|
| 3 |
+
"""Integration tests for setuptools that focus on building packages via pip.
|
| 4 |
+
|
| 5 |
+
The idea behind these tests is not to exhaustively check all the possible
|
| 6 |
+
combinations of packages, operating systems, supporting libraries, etc, but
|
| 7 |
+
rather check a limited number of popular packages and how they interact with
|
| 8 |
+
the exposed public API. This way if any change in API is introduced, we hope to
|
| 9 |
+
identify backward compatibility problems before publishing a release.
|
| 10 |
+
|
| 11 |
+
The number of tested packages is purposefully kept small, to minimise duration
|
| 12 |
+
and the associated maintenance cost (changes in the way these packages define
|
| 13 |
+
their build process may require changes in the tests).
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import os
|
| 18 |
+
import shutil
|
| 19 |
+
import sys
|
| 20 |
+
from enum import Enum
|
| 21 |
+
from glob import glob
|
| 22 |
+
from hashlib import md5
|
| 23 |
+
from urllib.request import urlopen
|
| 24 |
+
|
| 25 |
+
import pytest
|
| 26 |
+
from packaging.requirements import Requirement
|
| 27 |
+
|
| 28 |
+
from .helpers import Archive, run
|
| 29 |
+
|
| 30 |
+
pytestmark = pytest.mark.integration
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
(LATEST,) = Enum("v", "LATEST") # type: ignore[misc] # https://github.com/python/mypy/issues/16936
|
| 34 |
+
"""Default version to be checked"""
|
| 35 |
+
# There are positive and negative aspects of checking the latest version of the
|
| 36 |
+
# packages.
|
| 37 |
+
# The main positive aspect is that the latest version might have already
|
| 38 |
+
# removed the use of APIs deprecated in previous releases of setuptools.
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Packages to be tested:
|
| 42 |
+
# (Please notice the test environment cannot support EVERY library required for
|
| 43 |
+
# compiling binary extensions. In Ubuntu/Debian nomenclature, we only assume
|
| 44 |
+
# that `build-essential`, `gfortran` and `libopenblas-dev` are installed,
|
| 45 |
+
# due to their relevance to the numerical/scientific programming ecosystem)
|
| 46 |
+
EXAMPLES = [
|
| 47 |
+
("pip", LATEST), # just in case...
|
| 48 |
+
("pytest", LATEST), # uses setuptools_scm
|
| 49 |
+
("mypy", LATEST), # custom build_py + ext_modules
|
| 50 |
+
# --- Popular packages: https://hugovk.github.io/top-pypi-packages/ ---
|
| 51 |
+
("botocore", LATEST),
|
| 52 |
+
("kiwisolver", LATEST), # build_ext
|
| 53 |
+
("brotli", LATEST), # not in the list but used by urllib3
|
| 54 |
+
("pyyaml", LATEST), # cython + custom build_ext + custom distclass
|
| 55 |
+
("charset-normalizer", LATEST), # uses mypyc, used by aiohttp
|
| 56 |
+
("protobuf", LATEST),
|
| 57 |
+
("requests", LATEST),
|
| 58 |
+
("celery", LATEST),
|
| 59 |
+
# When adding packages to this list, make sure they expose a `__version__`
|
| 60 |
+
# attribute, or modify the tests below
|
| 61 |
+
]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# Some packages have "optional" dependencies that modify their build behaviour
|
| 65 |
+
# and are not listed in pyproject.toml, others still use `setup_requires`
|
| 66 |
+
EXTRA_BUILD_DEPS = {
|
| 67 |
+
"pyyaml": ("Cython<3.0",), # constraint to avoid errors
|
| 68 |
+
"charset-normalizer": ("mypy>=1.4.1",), # no pyproject.toml available
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
EXTRA_ENV_VARS = {
|
| 72 |
+
"pyyaml": {"PYYAML_FORCE_CYTHON": "1"},
|
| 73 |
+
"charset-normalizer": {"CHARSET_NORMALIZER_USE_MYPYC": "1"},
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
IMPORT_NAME = {
|
| 77 |
+
"pyyaml": "yaml",
|
| 78 |
+
"protobuf": "google.protobuf",
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
VIRTUALENV = (sys.executable, "-m", "virtualenv")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# By default, pip will try to build packages in isolation (PEP 517), which
|
| 86 |
+
# means it will download the previous stable version of setuptools.
|
| 87 |
+
# `pip` flags can avoid that (the version of setuptools under test
|
| 88 |
+
# should be the one to be used)
|
| 89 |
+
INSTALL_OPTIONS = (
|
| 90 |
+
"--ignore-installed",
|
| 91 |
+
"--no-build-isolation",
|
| 92 |
+
# Omit "--no-binary :all:" the sdist is supplied directly.
|
| 93 |
+
# Allows dependencies as wheels.
|
| 94 |
+
)
|
| 95 |
+
# The downside of `--no-build-isolation` is that pip will not download build
|
| 96 |
+
# dependencies. The test script will have to also handle that.
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@pytest.fixture
|
| 100 |
+
def venv_python(tmp_path):
|
| 101 |
+
run([*VIRTUALENV, str(tmp_path / ".venv")])
|
| 102 |
+
possible_path = (str(p.parent) for p in tmp_path.glob(".venv/*/python*"))
|
| 103 |
+
return shutil.which("python", path=os.pathsep.join(possible_path))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@pytest.fixture(autouse=True)
|
| 107 |
+
def _prepare(tmp_path, venv_python, monkeypatch):
|
| 108 |
+
download_path = os.getenv("DOWNLOAD_PATH", str(tmp_path))
|
| 109 |
+
os.makedirs(download_path, exist_ok=True)
|
| 110 |
+
|
| 111 |
+
# Environment vars used for building some of the packages
|
| 112 |
+
monkeypatch.setenv("USE_MYPYC", "1")
|
| 113 |
+
|
| 114 |
+
yield
|
| 115 |
+
|
| 116 |
+
# Let's provide the maximum amount of information possible in the case
|
| 117 |
+
# it is necessary to debug the tests directly from the CI logs.
|
| 118 |
+
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
| 119 |
+
print("Temporary directory:")
|
| 120 |
+
map(print, tmp_path.glob("*"))
|
| 121 |
+
print("Virtual environment:")
|
| 122 |
+
run([venv_python, "-m", "pip", "freeze"])
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@pytest.mark.parametrize(("package", "version"), EXAMPLES)
|
| 126 |
+
@pytest.mark.uses_network
|
| 127 |
+
def test_install_sdist(package, version, tmp_path, venv_python, setuptools_wheel):
|
| 128 |
+
venv_pip = (venv_python, "-m", "pip")
|
| 129 |
+
sdist = retrieve_sdist(package, version, tmp_path)
|
| 130 |
+
deps = build_deps(package, sdist)
|
| 131 |
+
if deps:
|
| 132 |
+
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
| 133 |
+
print("Dependencies:", deps)
|
| 134 |
+
run([*venv_pip, "install", *deps])
|
| 135 |
+
|
| 136 |
+
# Use a virtualenv to simulate PEP 517 isolation
|
| 137 |
+
# but install fresh setuptools wheel to ensure the version under development
|
| 138 |
+
env = EXTRA_ENV_VARS.get(package, {})
|
| 139 |
+
run([*venv_pip, "install", "--force-reinstall", setuptools_wheel])
|
| 140 |
+
run([*venv_pip, "install", *INSTALL_OPTIONS, sdist], env)
|
| 141 |
+
|
| 142 |
+
# Execute a simple script to make sure the package was installed correctly
|
| 143 |
+
pkg = IMPORT_NAME.get(package, package).replace("-", "_")
|
| 144 |
+
script = f"import {pkg}; print(getattr({pkg}, '__version__', 0))"
|
| 145 |
+
run([venv_python, "-c", script])
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# ---- Helper Functions ----
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def retrieve_sdist(package, version, tmp_path):
|
| 152 |
+
"""Either use cached sdist file or download it from PyPI"""
|
| 153 |
+
# `pip download` cannot be used due to
|
| 154 |
+
# https://github.com/pypa/pip/issues/1884
|
| 155 |
+
# https://discuss.python.org/t/pep-625-file-name-of-a-source-distribution/4686
|
| 156 |
+
# We have to find the correct distribution file and download it
|
| 157 |
+
download_path = os.getenv("DOWNLOAD_PATH", str(tmp_path))
|
| 158 |
+
dist = retrieve_pypi_sdist_metadata(package, version)
|
| 159 |
+
|
| 160 |
+
# Remove old files to prevent cache to grow indefinitely
|
| 161 |
+
for file in glob(os.path.join(download_path, f"{package}*")):
|
| 162 |
+
if dist["filename"] != file:
|
| 163 |
+
os.unlink(file)
|
| 164 |
+
|
| 165 |
+
dist_file = os.path.join(download_path, dist["filename"])
|
| 166 |
+
if not os.path.exists(dist_file):
|
| 167 |
+
download(dist["url"], dist_file, dist["md5_digest"])
|
| 168 |
+
return dist_file
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def retrieve_pypi_sdist_metadata(package, version):
|
| 172 |
+
# https://warehouse.pypa.io/api-reference/json.html
|
| 173 |
+
id_ = package if version is LATEST else f"{package}/{version}"
|
| 174 |
+
with urlopen(f"https://pypi.org/pypi/{id_}/json") as f:
|
| 175 |
+
metadata = json.load(f)
|
| 176 |
+
|
| 177 |
+
if metadata["info"]["yanked"]:
|
| 178 |
+
raise ValueError(f"Release for {package} {version} was yanked")
|
| 179 |
+
|
| 180 |
+
version = metadata["info"]["version"]
|
| 181 |
+
release = metadata["releases"][version] if version is LATEST else metadata["urls"]
|
| 182 |
+
(sdist,) = filter(lambda d: d["packagetype"] == "sdist", release)
|
| 183 |
+
return sdist
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def download(url, dest, md5_digest):
|
| 187 |
+
with urlopen(url) as f:
|
| 188 |
+
data = f.read()
|
| 189 |
+
|
| 190 |
+
assert md5(data).hexdigest() == md5_digest
|
| 191 |
+
|
| 192 |
+
with open(dest, "wb") as f:
|
| 193 |
+
f.write(data)
|
| 194 |
+
|
| 195 |
+
assert os.path.exists(dest)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def build_deps(package, sdist_file):
|
| 199 |
+
"""Find out what are the build dependencies for a package.
|
| 200 |
+
|
| 201 |
+
"Manually" install them, since pip will not install build
|
| 202 |
+
deps with `--no-build-isolation`.
|
| 203 |
+
"""
|
| 204 |
+
# delay importing, since pytest discovery phase may hit this file from a
|
| 205 |
+
# testenv without tomli
|
| 206 |
+
from setuptools.compat.py310 import tomllib
|
| 207 |
+
|
| 208 |
+
archive = Archive(sdist_file)
|
| 209 |
+
info = tomllib.loads(_read_pyproject(archive))
|
| 210 |
+
deps = info.get("build-system", {}).get("requires", [])
|
| 211 |
+
deps += EXTRA_BUILD_DEPS.get(package, [])
|
| 212 |
+
# Remove setuptools from requirements (and deduplicate)
|
| 213 |
+
requirements = {Requirement(d).name: d for d in deps}
|
| 214 |
+
return [v for k, v in requirements.items() if k != "setuptools"]
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _read_pyproject(archive):
|
| 218 |
+
contents = (
|
| 219 |
+
archive.get_content(member)
|
| 220 |
+
for member in archive
|
| 221 |
+
if os.path.basename(archive.get_name(member)) == "pyproject.toml"
|
| 222 |
+
)
|
| 223 |
+
return next(contents, "")
|
minigpt2/lib/python3.10/site-packages/open_flamingo/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (275 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (172 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/classification.cpython-310.pyc
ADDED
|
Binary file (3.91 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/coco_metric.cpython-310.pyc
ADDED
|
Binary file (812 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/eval_datasets.cpython-310.pyc
ADDED
|
Binary file (3.88 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/evaluate.cpython-310.pyc
ADDED
|
Binary file (21.7 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/imagenet_utils.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/ok_vqa_utils.cpython-310.pyc
ADDED
|
Binary file (6.12 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/__pycache__/vqa_metric.cpython-310.pyc
ADDED
|
Binary file (15.5 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/classification.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Sequence, Tuple
|
| 2 |
+
import re
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def postprocess_classification_generation(predictions) -> str:
|
| 8 |
+
return re.split("Prompt|Completion", predictions, 1)[0]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def compute_classification_accuracy(predictions: Sequence[Dict[str, str]]) -> float:
|
| 12 |
+
"""Compute the accuracy of a sequence of predictions."""
|
| 13 |
+
|
| 14 |
+
def _preprocess_fn(s):
|
| 15 |
+
"""Function to preprocess both targets and predictions."""
|
| 16 |
+
return s.lower()
|
| 17 |
+
|
| 18 |
+
is_correct = [
|
| 19 |
+
_preprocess_fn(x["prediction"]) == _preprocess_fn(x["class_label"])
|
| 20 |
+
for x in predictions
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
return np.mean(is_correct).item()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def compute_shifted_logits_and_labels(
|
| 27 |
+
logits: torch.Tensor, encodings, tokenizer, eoc_token_id
|
| 28 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 29 |
+
"""Helper function to compute shifted logits and labels.
|
| 30 |
+
|
| 31 |
+
This allows for straightforward computation of the loss on shift_logits
|
| 32 |
+
and shift_labels such that the nth element of logits computes the n-1th
|
| 33 |
+
element of the original labels (in the outputs, the nth element of logits
|
| 34 |
+
corresponds to the nth element of the labels).
|
| 35 |
+
|
| 36 |
+
Elements in shift_labels that correspond to inputs are masked with values
|
| 37 |
+
of -100 (by default in hf, loss is only computed on token IDs >= 0).
|
| 38 |
+
|
| 39 |
+
Returns: tuple containing two elements:
|
| 40 |
+
shift_logits: a float Tensor of shape [batch_size, seq_len - 1].
|
| 41 |
+
shift_labels: an integer Tensor of shape [batch_size, seq_len - 1]
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
labels = encodings["input_ids"].clone()
|
| 45 |
+
|
| 46 |
+
# convert padding and EOC tokens to -100 so they are ignored in loss
|
| 47 |
+
labels[labels == tokenizer.pad_token_id] = -100
|
| 48 |
+
labels[labels == eoc_token_id] = -100
|
| 49 |
+
|
| 50 |
+
# Convert all tokens in prefix until separator to -100 so they are
|
| 51 |
+
# ignored in loss
|
| 52 |
+
for idx in range(len(labels)):
|
| 53 |
+
# Find the location of the last token of prefix *from right*,
|
| 54 |
+
# since the first non-padding token of the sequence will also be
|
| 55 |
+
# eos_token (because bos_token and eos_token are the same for
|
| 56 |
+
# the tokenizer).
|
| 57 |
+
end_of_prefix = -labels[idx].tolist()[::-1].index(tokenizer.eos_token_id) - 1
|
| 58 |
+
labels[idx, : end_of_prefix + 1] = -100
|
| 59 |
+
|
| 60 |
+
# Shift so that tokens < n predict n. The shifted tensors both have
|
| 61 |
+
# shape [batch_size, seq_len - 1].
|
| 62 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 63 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 64 |
+
|
| 65 |
+
return shift_logits, shift_labels
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def compute_per_sample_probs(
|
| 69 |
+
encodings, tokenizer, logits: torch.Tensor, eoc_token_id
|
| 70 |
+
) -> torch.Tensor:
|
| 71 |
+
"""Helper function to compute per-sample probability of the input sequence.
|
| 72 |
+
|
| 73 |
+
Assumes <eos token> is used to separate inputs from targets in the
|
| 74 |
+
prompt text
|
| 75 |
+
"""
|
| 76 |
+
shift_logits, shift_labels = compute_shifted_logits_and_labels(
|
| 77 |
+
logits, encodings, tokenizer, eoc_token_id
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
# Tuple of tensors for unmasked label tokens. The first element of the
|
| 81 |
+
# tuple contains the batch indices; the second element contains the
|
| 82 |
+
# sequence indices.
|
| 83 |
+
unmasked_indices = torch.nonzero(shift_labels != -100, as_tuple=True)
|
| 84 |
+
# Tensor where the i^th element is the token_id corresponding to the i^th
|
| 85 |
+
# element of unmasked_indices
|
| 86 |
+
unmasked_token_ids = shift_labels[unmasked_indices]
|
| 87 |
+
|
| 88 |
+
# 3d tensor of [batch_idx, sequence_position, token_id] for unmasked tokens.
|
| 89 |
+
target_idxs = torch.column_stack([*unmasked_indices, unmasked_token_ids])
|
| 90 |
+
target_idxs = target_idxs.to(shift_logits.device)
|
| 91 |
+
|
| 92 |
+
# Sanity check that every element in batch has at least one unmasked
|
| 93 |
+
# target token
|
| 94 |
+
assert torch.all(
|
| 95 |
+
torch.bincount(target_idxs[:, 0]) != 0
|
| 96 |
+
), "At least one element in batch has no unmasked target tokens."
|
| 97 |
+
|
| 98 |
+
# Renormalize over tokens to make sure they are proper probabilities via
|
| 99 |
+
# softmax over the token dimension.
|
| 100 |
+
shift_probs = torch.nn.functional.softmax(shift_logits, 2)
|
| 101 |
+
|
| 102 |
+
# Compute the probability of the target sequence (as the product of the
|
| 103 |
+
# probability of the individual tokens in the sequence).
|
| 104 |
+
target_probs = torch.ones(len(shift_labels), device=shift_logits.device)
|
| 105 |
+
for i, j, k in target_idxs:
|
| 106 |
+
target_probs[i] *= shift_probs[i, j, k]
|
| 107 |
+
|
| 108 |
+
return target_probs
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def compute_per_sample_loss(encodings, tokenizer, logits, eoc_token_id) -> torch.Tensor:
|
| 112 |
+
"""Helper function to compute per-sample classification loss.
|
| 113 |
+
|
| 114 |
+
Assumes <eos token> is used to separate inputs from targets in the
|
| 115 |
+
prompt text
|
| 116 |
+
"""
|
| 117 |
+
shift_logits, shift_labels = compute_shifted_logits_and_labels(
|
| 118 |
+
logits, encodings, tokenizer, eoc_token_id
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
device = shift_logits.device
|
| 122 |
+
|
| 123 |
+
# Loss is computed token-wise, on Tensors of shape
|
| 124 |
+
# [batch_size * (seq_len - 1), vocab_size]
|
| 125 |
+
# and returns a loss tensor of shape
|
| 126 |
+
# [batch_size * (seq_len - 1)]. Most of the tokens will be masked
|
| 127 |
+
# in this computation.
|
| 128 |
+
loss = torch.nn.functional.cross_entropy(
|
| 129 |
+
shift_logits.view(-1, shift_logits.size(-1)),
|
| 130 |
+
shift_labels.view(-1).to(device),
|
| 131 |
+
reduction="none",
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
# Reshape to [batch_size, seq_len - 1]
|
| 135 |
+
loss = loss.view(shift_logits.size(0), shift_logits.size(1)).cpu()
|
| 136 |
+
|
| 137 |
+
# loss_mask is 1 for tokens we want included in the loss, and 0 for tokens
|
| 138 |
+
# that should be ignored in the loss.
|
| 139 |
+
loss_mask = (shift_labels != -100).int().cpu()
|
| 140 |
+
|
| 141 |
+
loss *= loss_mask
|
| 142 |
+
|
| 143 |
+
# Compute per-element loss : sum loss over all (unmasked) tokens and
|
| 144 |
+
# divide by number of variable tokens to obtain tensor of
|
| 145 |
+
# shape [batch_size,]
|
| 146 |
+
loss = loss.sum(dim=1) / (shift_labels != -100).sum(dim=1).float()
|
| 147 |
+
return loss
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/coco_metric.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pycocoevalcap.eval import COCOEvalCap
|
| 2 |
+
from pycocotools.coco import COCO
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def compute_cider(
|
| 6 |
+
result_path,
|
| 7 |
+
annotations_path="/data/yfcc-tmp/data/mscoco/annotations/captions_train2017.json",
|
| 8 |
+
):
|
| 9 |
+
# create coco object and coco_result object
|
| 10 |
+
coco = COCO(annotations_path)
|
| 11 |
+
coco_result = coco.loadRes(result_path)
|
| 12 |
+
|
| 13 |
+
# create coco_eval object by taking coco and coco_result
|
| 14 |
+
coco_eval = COCOEvalCap(coco, coco_result)
|
| 15 |
+
coco_eval.params["image_id"] = coco_result.getImgIds()
|
| 16 |
+
coco_eval.evaluate()
|
| 17 |
+
|
| 18 |
+
return coco_eval.eval
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def postprocess_captioning_generation(predictions):
|
| 22 |
+
return predictions.split("Output", 1)[0]
|
minigpt2/lib/python3.10/site-packages/open_flamingo/eval/imagenet_utils.py
ADDED
|
@@ -0,0 +1,1007 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# classnames via https://github.com/mlfoundations/wise-ft/blob/master/src/datasets/imagenet_classnames.py#L1
|
| 2 |
+
openai_imagenet_classnames = [
|
| 3 |
+
"tench",
|
| 4 |
+
"goldfish",
|
| 5 |
+
"great white shark",
|
| 6 |
+
"tiger shark",
|
| 7 |
+
"hammerhead shark",
|
| 8 |
+
"electric ray",
|
| 9 |
+
"stingray",
|
| 10 |
+
"rooster",
|
| 11 |
+
"hen",
|
| 12 |
+
"ostrich",
|
| 13 |
+
"brambling",
|
| 14 |
+
"goldfinch",
|
| 15 |
+
"house finch",
|
| 16 |
+
"junco",
|
| 17 |
+
"indigo bunting",
|
| 18 |
+
"American robin",
|
| 19 |
+
"bulbul",
|
| 20 |
+
"jay",
|
| 21 |
+
"magpie",
|
| 22 |
+
"chickadee",
|
| 23 |
+
"American dipper",
|
| 24 |
+
"kite (bird of prey)",
|
| 25 |
+
"bald eagle",
|
| 26 |
+
"vulture",
|
| 27 |
+
"great grey owl",
|
| 28 |
+
"fire salamander",
|
| 29 |
+
"smooth newt",
|
| 30 |
+
"newt",
|
| 31 |
+
"spotted salamander",
|
| 32 |
+
"axolotl",
|
| 33 |
+
"American bullfrog",
|
| 34 |
+
"tree frog",
|
| 35 |
+
"tailed frog",
|
| 36 |
+
"loggerhead sea turtle",
|
| 37 |
+
"leatherback sea turtle",
|
| 38 |
+
"mud turtle",
|
| 39 |
+
"terrapin",
|
| 40 |
+
"box turtle",
|
| 41 |
+
"banded gecko",
|
| 42 |
+
"green iguana",
|
| 43 |
+
"Carolina anole",
|
| 44 |
+
"desert grassland whiptail lizard",
|
| 45 |
+
"agama",
|
| 46 |
+
"frilled-necked lizard",
|
| 47 |
+
"alligator lizard",
|
| 48 |
+
"Gila monster",
|
| 49 |
+
"European green lizard",
|
| 50 |
+
"chameleon",
|
| 51 |
+
"Komodo dragon",
|
| 52 |
+
"Nile crocodile",
|
| 53 |
+
"American alligator",
|
| 54 |
+
"triceratops",
|
| 55 |
+
"worm snake",
|
| 56 |
+
"ring-necked snake",
|
| 57 |
+
"eastern hog-nosed snake",
|
| 58 |
+
"smooth green snake",
|
| 59 |
+
"kingsnake",
|
| 60 |
+
"garter snake",
|
| 61 |
+
"water snake",
|
| 62 |
+
"vine snake",
|
| 63 |
+
"night snake",
|
| 64 |
+
"boa constrictor",
|
| 65 |
+
"African rock python",
|
| 66 |
+
"Indian cobra",
|
| 67 |
+
"green mamba",
|
| 68 |
+
"sea snake",
|
| 69 |
+
"Saharan horned viper",
|
| 70 |
+
"eastern diamondback rattlesnake",
|
| 71 |
+
"sidewinder rattlesnake",
|
| 72 |
+
"trilobite",
|
| 73 |
+
"harvestman",
|
| 74 |
+
"scorpion",
|
| 75 |
+
"yellow garden spider",
|
| 76 |
+
"barn spider",
|
| 77 |
+
"European garden spider",
|
| 78 |
+
"southern black widow",
|
| 79 |
+
"tarantula",
|
| 80 |
+
"wolf spider",
|
| 81 |
+
"tick",
|
| 82 |
+
"centipede",
|
| 83 |
+
"black grouse",
|
| 84 |
+
"ptarmigan",
|
| 85 |
+
"ruffed grouse",
|
| 86 |
+
"prairie grouse",
|
| 87 |
+
"peafowl",
|
| 88 |
+
"quail",
|
| 89 |
+
"partridge",
|
| 90 |
+
"african grey parrot",
|
| 91 |
+
"macaw",
|
| 92 |
+
"sulphur-crested cockatoo",
|
| 93 |
+
"lorikeet",
|
| 94 |
+
"coucal",
|
| 95 |
+
"bee eater",
|
| 96 |
+
"hornbill",
|
| 97 |
+
"hummingbird",
|
| 98 |
+
"jacamar",
|
| 99 |
+
"toucan",
|
| 100 |
+
"duck",
|
| 101 |
+
"red-breasted merganser",
|
| 102 |
+
"goose",
|
| 103 |
+
"black swan",
|
| 104 |
+
"tusker",
|
| 105 |
+
"echidna",
|
| 106 |
+
"platypus",
|
| 107 |
+
"wallaby",
|
| 108 |
+
"koala",
|
| 109 |
+
"wombat",
|
| 110 |
+
"jellyfish",
|
| 111 |
+
"sea anemone",
|
| 112 |
+
"brain coral",
|
| 113 |
+
"flatworm",
|
| 114 |
+
"nematode",
|
| 115 |
+
"conch",
|
| 116 |
+
"snail",
|
| 117 |
+
"slug",
|
| 118 |
+
"sea slug",
|
| 119 |
+
"chiton",
|
| 120 |
+
"chambered nautilus",
|
| 121 |
+
"Dungeness crab",
|
| 122 |
+
"rock crab",
|
| 123 |
+
"fiddler crab",
|
| 124 |
+
"red king crab",
|
| 125 |
+
"American lobster",
|
| 126 |
+
"spiny lobster",
|
| 127 |
+
"crayfish",
|
| 128 |
+
"hermit crab",
|
| 129 |
+
"isopod",
|
| 130 |
+
"white stork",
|
| 131 |
+
"black stork",
|
| 132 |
+
"spoonbill",
|
| 133 |
+
"flamingo",
|
| 134 |
+
"little blue heron",
|
| 135 |
+
"great egret",
|
| 136 |
+
"bittern bird",
|
| 137 |
+
"crane bird",
|
| 138 |
+
"limpkin",
|
| 139 |
+
"common gallinule",
|
| 140 |
+
"American coot",
|
| 141 |
+
"bustard",
|
| 142 |
+
"ruddy turnstone",
|
| 143 |
+
"dunlin",
|
| 144 |
+
"common redshank",
|
| 145 |
+
"dowitcher",
|
| 146 |
+
"oystercatcher",
|
| 147 |
+
"pelican",
|
| 148 |
+
"king penguin",
|
| 149 |
+
"albatross",
|
| 150 |
+
"grey whale",
|
| 151 |
+
"killer whale",
|
| 152 |
+
"dugong",
|
| 153 |
+
"sea lion",
|
| 154 |
+
"Chihuahua",
|
| 155 |
+
"Japanese Chin",
|
| 156 |
+
"Maltese",
|
| 157 |
+
"Pekingese",
|
| 158 |
+
"Shih Tzu",
|
| 159 |
+
"King Charles Spaniel",
|
| 160 |
+
"Papillon",
|
| 161 |
+
"toy terrier",
|
| 162 |
+
"Rhodesian Ridgeback",
|
| 163 |
+
"Afghan Hound",
|
| 164 |
+
"Basset Hound",
|
| 165 |
+
"Beagle",
|
| 166 |
+
"Bloodhound",
|
| 167 |
+
"Bluetick Coonhound",
|
| 168 |
+
"Black and Tan Coonhound",
|
| 169 |
+
"Treeing Walker Coonhound",
|
| 170 |
+
"English foxhound",
|
| 171 |
+
"Redbone Coonhound",
|
| 172 |
+
"borzoi",
|
| 173 |
+
"Irish Wolfhound",
|
| 174 |
+
"Italian Greyhound",
|
| 175 |
+
"Whippet",
|
| 176 |
+
"Ibizan Hound",
|
| 177 |
+
"Norwegian Elkhound",
|
| 178 |
+
"Otterhound",
|
| 179 |
+
"Saluki",
|
| 180 |
+
"Scottish Deerhound",
|
| 181 |
+
"Weimaraner",
|
| 182 |
+
"Staffordshire Bull Terrier",
|
| 183 |
+
"American Staffordshire Terrier",
|
| 184 |
+
"Bedlington Terrier",
|
| 185 |
+
"Border Terrier",
|
| 186 |
+
"Kerry Blue Terrier",
|
| 187 |
+
"Irish Terrier",
|
| 188 |
+
"Norfolk Terrier",
|
| 189 |
+
"Norwich Terrier",
|
| 190 |
+
"Yorkshire Terrier",
|
| 191 |
+
"Wire Fox Terrier",
|
| 192 |
+
"Lakeland Terrier",
|
| 193 |
+
"Sealyham Terrier",
|
| 194 |
+
"Airedale Terrier",
|
| 195 |
+
"Cairn Terrier",
|
| 196 |
+
"Australian Terrier",
|
| 197 |
+
"Dandie Dinmont Terrier",
|
| 198 |
+
"Boston Terrier",
|
| 199 |
+
"Miniature Schnauzer",
|
| 200 |
+
"Giant Schnauzer",
|
| 201 |
+
"Standard Schnauzer",
|
| 202 |
+
"Scottish Terrier",
|
| 203 |
+
"Tibetan Terrier",
|
| 204 |
+
"Australian Silky Terrier",
|
| 205 |
+
"Soft-coated Wheaten Terrier",
|
| 206 |
+
"West Highland White Terrier",
|
| 207 |
+
"Lhasa Apso",
|
| 208 |
+
"Flat-Coated Retriever",
|
| 209 |
+
"Curly-coated Retriever",
|
| 210 |
+
"Golden Retriever",
|
| 211 |
+
"Labrador Retriever",
|
| 212 |
+
"Chesapeake Bay Retriever",
|
| 213 |
+
"German Shorthaired Pointer",
|
| 214 |
+
"Vizsla",
|
| 215 |
+
"English Setter",
|
| 216 |
+
"Irish Setter",
|
| 217 |
+
"Gordon Setter",
|
| 218 |
+
"Brittany dog",
|
| 219 |
+
"Clumber Spaniel",
|
| 220 |
+
"English Springer Spaniel",
|
| 221 |
+
"Welsh Springer Spaniel",
|
| 222 |
+
"Cocker Spaniel",
|
| 223 |
+
"Sussex Spaniel",
|
| 224 |
+
"Irish Water Spaniel",
|
| 225 |
+
"Kuvasz",
|
| 226 |
+
"Schipperke",
|
| 227 |
+
"Groenendael dog",
|
| 228 |
+
"Malinois",
|
| 229 |
+
"Briard",
|
| 230 |
+
"Australian Kelpie",
|
| 231 |
+
"Komondor",
|
| 232 |
+
"Old English Sheepdog",
|
| 233 |
+
"Shetland Sheepdog",
|
| 234 |
+
"collie",
|
| 235 |
+
"Border Collie",
|
| 236 |
+
"Bouvier des Flandres dog",
|
| 237 |
+
"Rottweiler",
|
| 238 |
+
"German Shepherd Dog",
|
| 239 |
+
"Dobermann",
|
| 240 |
+
"Miniature Pinscher",
|
| 241 |
+
"Greater Swiss Mountain Dog",
|
| 242 |
+
"Bernese Mountain Dog",
|
| 243 |
+
"Appenzeller Sennenhund",
|
| 244 |
+
"Entlebucher Sennenhund",
|
| 245 |
+
"Boxer",
|
| 246 |
+
"Bullmastiff",
|
| 247 |
+
"Tibetan Mastiff",
|
| 248 |
+
"French Bulldog",
|
| 249 |
+
"Great Dane",
|
| 250 |
+
"St. Bernard",
|
| 251 |
+
"husky",
|
| 252 |
+
"Alaskan Malamute",
|
| 253 |
+
"Siberian Husky",
|
| 254 |
+
"Dalmatian",
|
| 255 |
+
"Affenpinscher",
|
| 256 |
+
"Basenji",
|
| 257 |
+
"pug",
|
| 258 |
+
"Leonberger",
|
| 259 |
+
"Newfoundland dog",
|
| 260 |
+
"Great Pyrenees dog",
|
| 261 |
+
"Samoyed",
|
| 262 |
+
"Pomeranian",
|
| 263 |
+
"Chow Chow",
|
| 264 |
+
"Keeshond",
|
| 265 |
+
"brussels griffon",
|
| 266 |
+
"Pembroke Welsh Corgi",
|
| 267 |
+
"Cardigan Welsh Corgi",
|
| 268 |
+
"Toy Poodle",
|
| 269 |
+
"Miniature Poodle",
|
| 270 |
+
"Standard Poodle",
|
| 271 |
+
"Mexican hairless dog (xoloitzcuintli)",
|
| 272 |
+
"grey wolf",
|
| 273 |
+
"Alaskan tundra wolf",
|
| 274 |
+
"red wolf or maned wolf",
|
| 275 |
+
"coyote",
|
| 276 |
+
"dingo",
|
| 277 |
+
"dhole",
|
| 278 |
+
"African wild dog",
|
| 279 |
+
"hyena",
|
| 280 |
+
"red fox",
|
| 281 |
+
"kit fox",
|
| 282 |
+
"Arctic fox",
|
| 283 |
+
"grey fox",
|
| 284 |
+
"tabby cat",
|
| 285 |
+
"tiger cat",
|
| 286 |
+
"Persian cat",
|
| 287 |
+
"Siamese cat",
|
| 288 |
+
"Egyptian Mau",
|
| 289 |
+
"cougar",
|
| 290 |
+
"lynx",
|
| 291 |
+
"leopard",
|
| 292 |
+
"snow leopard",
|
| 293 |
+
"jaguar",
|
| 294 |
+
"lion",
|
| 295 |
+
"tiger",
|
| 296 |
+
"cheetah",
|
| 297 |
+
"brown bear",
|
| 298 |
+
"American black bear",
|
| 299 |
+
"polar bear",
|
| 300 |
+
"sloth bear",
|
| 301 |
+
"mongoose",
|
| 302 |
+
"meerkat",
|
| 303 |
+
"tiger beetle",
|
| 304 |
+
"ladybug",
|
| 305 |
+
"ground beetle",
|
| 306 |
+
"longhorn beetle",
|
| 307 |
+
"leaf beetle",
|
| 308 |
+
"dung beetle",
|
| 309 |
+
"rhinoceros beetle",
|
| 310 |
+
"weevil",
|
| 311 |
+
"fly",
|
| 312 |
+
"bee",
|
| 313 |
+
"ant",
|
| 314 |
+
"grasshopper",
|
| 315 |
+
"cricket insect",
|
| 316 |
+
"stick insect",
|
| 317 |
+
"cockroach",
|
| 318 |
+
"praying mantis",
|
| 319 |
+
"cicada",
|
| 320 |
+
"leafhopper",
|
| 321 |
+
"lacewing",
|
| 322 |
+
"dragonfly",
|
| 323 |
+
"damselfly",
|
| 324 |
+
"red admiral butterfly",
|
| 325 |
+
"ringlet butterfly",
|
| 326 |
+
"monarch butterfly",
|
| 327 |
+
"small white butterfly",
|
| 328 |
+
"sulphur butterfly",
|
| 329 |
+
"gossamer-winged butterfly",
|
| 330 |
+
"starfish",
|
| 331 |
+
"sea urchin",
|
| 332 |
+
"sea cucumber",
|
| 333 |
+
"cottontail rabbit",
|
| 334 |
+
"hare",
|
| 335 |
+
"Angora rabbit",
|
| 336 |
+
"hamster",
|
| 337 |
+
"porcupine",
|
| 338 |
+
"fox squirrel",
|
| 339 |
+
"marmot",
|
| 340 |
+
"beaver",
|
| 341 |
+
"guinea pig",
|
| 342 |
+
"common sorrel horse",
|
| 343 |
+
"zebra",
|
| 344 |
+
"pig",
|
| 345 |
+
"wild boar",
|
| 346 |
+
"warthog",
|
| 347 |
+
"hippopotamus",
|
| 348 |
+
"ox",
|
| 349 |
+
"water buffalo",
|
| 350 |
+
"bison",
|
| 351 |
+
"ram (adult male sheep)",
|
| 352 |
+
"bighorn sheep",
|
| 353 |
+
"Alpine ibex",
|
| 354 |
+
"hartebeest",
|
| 355 |
+
"impala (antelope)",
|
| 356 |
+
"gazelle",
|
| 357 |
+
"arabian camel",
|
| 358 |
+
"llama",
|
| 359 |
+
"weasel",
|
| 360 |
+
"mink",
|
| 361 |
+
"European polecat",
|
| 362 |
+
"black-footed ferret",
|
| 363 |
+
"otter",
|
| 364 |
+
"skunk",
|
| 365 |
+
"badger",
|
| 366 |
+
"armadillo",
|
| 367 |
+
"three-toed sloth",
|
| 368 |
+
"orangutan",
|
| 369 |
+
"gorilla",
|
| 370 |
+
"chimpanzee",
|
| 371 |
+
"gibbon",
|
| 372 |
+
"siamang",
|
| 373 |
+
"guenon",
|
| 374 |
+
"patas monkey",
|
| 375 |
+
"baboon",
|
| 376 |
+
"macaque",
|
| 377 |
+
"langur",
|
| 378 |
+
"black-and-white colobus",
|
| 379 |
+
"proboscis monkey",
|
| 380 |
+
"marmoset",
|
| 381 |
+
"white-headed capuchin",
|
| 382 |
+
"howler monkey",
|
| 383 |
+
"titi monkey",
|
| 384 |
+
"Geoffroy's spider monkey",
|
| 385 |
+
"common squirrel monkey",
|
| 386 |
+
"ring-tailed lemur",
|
| 387 |
+
"indri",
|
| 388 |
+
"Asian elephant",
|
| 389 |
+
"African bush elephant",
|
| 390 |
+
"red panda",
|
| 391 |
+
"giant panda",
|
| 392 |
+
"snoek fish",
|
| 393 |
+
"eel",
|
| 394 |
+
"silver salmon",
|
| 395 |
+
"rock beauty fish",
|
| 396 |
+
"clownfish",
|
| 397 |
+
"sturgeon",
|
| 398 |
+
"gar fish",
|
| 399 |
+
"lionfish",
|
| 400 |
+
"pufferfish",
|
| 401 |
+
"abacus",
|
| 402 |
+
"abaya",
|
| 403 |
+
"academic gown",
|
| 404 |
+
"accordion",
|
| 405 |
+
"acoustic guitar",
|
| 406 |
+
"aircraft carrier",
|
| 407 |
+
"airliner",
|
| 408 |
+
"airship",
|
| 409 |
+
"altar",
|
| 410 |
+
"ambulance",
|
| 411 |
+
"amphibious vehicle",
|
| 412 |
+
"analog clock",
|
| 413 |
+
"apiary",
|
| 414 |
+
"apron",
|
| 415 |
+
"trash can",
|
| 416 |
+
"assault rifle",
|
| 417 |
+
"backpack",
|
| 418 |
+
"bakery",
|
| 419 |
+
"balance beam",
|
| 420 |
+
"balloon",
|
| 421 |
+
"ballpoint pen",
|
| 422 |
+
"Band-Aid",
|
| 423 |
+
"banjo",
|
| 424 |
+
"baluster / handrail",
|
| 425 |
+
"barbell",
|
| 426 |
+
"barber chair",
|
| 427 |
+
"barbershop",
|
| 428 |
+
"barn",
|
| 429 |
+
"barometer",
|
| 430 |
+
"barrel",
|
| 431 |
+
"wheelbarrow",
|
| 432 |
+
"baseball",
|
| 433 |
+
"basketball",
|
| 434 |
+
"bassinet",
|
| 435 |
+
"bassoon",
|
| 436 |
+
"swimming cap",
|
| 437 |
+
"bath towel",
|
| 438 |
+
"bathtub",
|
| 439 |
+
"station wagon",
|
| 440 |
+
"lighthouse",
|
| 441 |
+
"beaker",
|
| 442 |
+
"military hat (bearskin or shako)",
|
| 443 |
+
"beer bottle",
|
| 444 |
+
"beer glass",
|
| 445 |
+
"bell tower",
|
| 446 |
+
"baby bib",
|
| 447 |
+
"tandem bicycle",
|
| 448 |
+
"bikini",
|
| 449 |
+
"ring binder",
|
| 450 |
+
"binoculars",
|
| 451 |
+
"birdhouse",
|
| 452 |
+
"boathouse",
|
| 453 |
+
"bobsleigh",
|
| 454 |
+
"bolo tie",
|
| 455 |
+
"poke bonnet",
|
| 456 |
+
"bookcase",
|
| 457 |
+
"bookstore",
|
| 458 |
+
"bottle cap",
|
| 459 |
+
"hunting bow",
|
| 460 |
+
"bow tie",
|
| 461 |
+
"brass memorial plaque",
|
| 462 |
+
"bra",
|
| 463 |
+
"breakwater",
|
| 464 |
+
"breastplate",
|
| 465 |
+
"broom",
|
| 466 |
+
"bucket",
|
| 467 |
+
"buckle",
|
| 468 |
+
"bulletproof vest",
|
| 469 |
+
"high-speed train",
|
| 470 |
+
"butcher shop",
|
| 471 |
+
"taxicab",
|
| 472 |
+
"cauldron",
|
| 473 |
+
"candle",
|
| 474 |
+
"cannon",
|
| 475 |
+
"canoe",
|
| 476 |
+
"can opener",
|
| 477 |
+
"cardigan",
|
| 478 |
+
"car mirror",
|
| 479 |
+
"carousel",
|
| 480 |
+
"tool kit",
|
| 481 |
+
"cardboard box / carton",
|
| 482 |
+
"car wheel",
|
| 483 |
+
"automated teller machine",
|
| 484 |
+
"cassette",
|
| 485 |
+
"cassette player",
|
| 486 |
+
"castle",
|
| 487 |
+
"catamaran",
|
| 488 |
+
"CD player",
|
| 489 |
+
"cello",
|
| 490 |
+
"mobile phone",
|
| 491 |
+
"chain",
|
| 492 |
+
"chain-link fence",
|
| 493 |
+
"chain mail",
|
| 494 |
+
"chainsaw",
|
| 495 |
+
"storage chest",
|
| 496 |
+
"chiffonier",
|
| 497 |
+
"bell or wind chime",
|
| 498 |
+
"china cabinet",
|
| 499 |
+
"Christmas stocking",
|
| 500 |
+
"church",
|
| 501 |
+
"movie theater",
|
| 502 |
+
"cleaver",
|
| 503 |
+
"cliff dwelling",
|
| 504 |
+
"cloak",
|
| 505 |
+
"clogs",
|
| 506 |
+
"cocktail shaker",
|
| 507 |
+
"coffee mug",
|
| 508 |
+
"coffeemaker",
|
| 509 |
+
"spiral or coil",
|
| 510 |
+
"combination lock",
|
| 511 |
+
"computer keyboard",
|
| 512 |
+
"candy store",
|
| 513 |
+
"container ship",
|
| 514 |
+
"convertible",
|
| 515 |
+
"corkscrew",
|
| 516 |
+
"cornet",
|
| 517 |
+
"cowboy boot",
|
| 518 |
+
"cowboy hat",
|
| 519 |
+
"cradle",
|
| 520 |
+
"construction crane",
|
| 521 |
+
"crash helmet",
|
| 522 |
+
"crate",
|
| 523 |
+
"infant bed",
|
| 524 |
+
"Crock Pot",
|
| 525 |
+
"croquet ball",
|
| 526 |
+
"crutch",
|
| 527 |
+
"cuirass",
|
| 528 |
+
"dam",
|
| 529 |
+
"desk",
|
| 530 |
+
"desktop computer",
|
| 531 |
+
"rotary dial telephone",
|
| 532 |
+
"diaper",
|
| 533 |
+
"digital clock",
|
| 534 |
+
"digital watch",
|
| 535 |
+
"dining table",
|
| 536 |
+
"dishcloth",
|
| 537 |
+
"dishwasher",
|
| 538 |
+
"disc brake",
|
| 539 |
+
"dock",
|
| 540 |
+
"dog sled",
|
| 541 |
+
"dome",
|
| 542 |
+
"doormat",
|
| 543 |
+
"drilling rig",
|
| 544 |
+
"drum",
|
| 545 |
+
"drumstick",
|
| 546 |
+
"dumbbell",
|
| 547 |
+
"Dutch oven",
|
| 548 |
+
"electric fan",
|
| 549 |
+
"electric guitar",
|
| 550 |
+
"electric locomotive",
|
| 551 |
+
"entertainment center",
|
| 552 |
+
"envelope",
|
| 553 |
+
"espresso machine",
|
| 554 |
+
"face powder",
|
| 555 |
+
"feather boa",
|
| 556 |
+
"filing cabinet",
|
| 557 |
+
"fireboat",
|
| 558 |
+
"fire truck",
|
| 559 |
+
"fire screen",
|
| 560 |
+
"flagpole",
|
| 561 |
+
"flute",
|
| 562 |
+
"folding chair",
|
| 563 |
+
"football helmet",
|
| 564 |
+
"forklift",
|
| 565 |
+
"fountain",
|
| 566 |
+
"fountain pen",
|
| 567 |
+
"four-poster bed",
|
| 568 |
+
"freight car",
|
| 569 |
+
"French horn",
|
| 570 |
+
"frying pan",
|
| 571 |
+
"fur coat",
|
| 572 |
+
"garbage truck",
|
| 573 |
+
"gas mask or respirator",
|
| 574 |
+
"gas pump",
|
| 575 |
+
"goblet",
|
| 576 |
+
"go-kart",
|
| 577 |
+
"golf ball",
|
| 578 |
+
"golf cart",
|
| 579 |
+
"gondola",
|
| 580 |
+
"gong",
|
| 581 |
+
"gown",
|
| 582 |
+
"grand piano",
|
| 583 |
+
"greenhouse",
|
| 584 |
+
"radiator grille",
|
| 585 |
+
"grocery store",
|
| 586 |
+
"guillotine",
|
| 587 |
+
"hair clip",
|
| 588 |
+
"hair spray",
|
| 589 |
+
"half-track",
|
| 590 |
+
"hammer",
|
| 591 |
+
"hamper",
|
| 592 |
+
"hair dryer",
|
| 593 |
+
"hand-held computer",
|
| 594 |
+
"handkerchief",
|
| 595 |
+
"hard disk drive",
|
| 596 |
+
"harmonica",
|
| 597 |
+
"harp",
|
| 598 |
+
"combine harvester",
|
| 599 |
+
"hatchet",
|
| 600 |
+
"holster",
|
| 601 |
+
"home theater",
|
| 602 |
+
"honeycomb",
|
| 603 |
+
"hook",
|
| 604 |
+
"hoop skirt",
|
| 605 |
+
"gymnastic horizontal bar",
|
| 606 |
+
"horse-drawn vehicle",
|
| 607 |
+
"hourglass",
|
| 608 |
+
"iPod",
|
| 609 |
+
"clothes iron",
|
| 610 |
+
"carved pumpkin",
|
| 611 |
+
"jeans",
|
| 612 |
+
"jeep",
|
| 613 |
+
"T-shirt",
|
| 614 |
+
"jigsaw puzzle",
|
| 615 |
+
"rickshaw",
|
| 616 |
+
"joystick",
|
| 617 |
+
"kimono",
|
| 618 |
+
"knee pad",
|
| 619 |
+
"knot",
|
| 620 |
+
"lab coat",
|
| 621 |
+
"ladle",
|
| 622 |
+
"lampshade",
|
| 623 |
+
"laptop computer",
|
| 624 |
+
"lawn mower",
|
| 625 |
+
"lens cap",
|
| 626 |
+
"letter opener",
|
| 627 |
+
"library",
|
| 628 |
+
"lifeboat",
|
| 629 |
+
"lighter",
|
| 630 |
+
"limousine",
|
| 631 |
+
"ocean liner",
|
| 632 |
+
"lipstick",
|
| 633 |
+
"slip-on shoe",
|
| 634 |
+
"lotion",
|
| 635 |
+
"music speaker",
|
| 636 |
+
"loupe magnifying glass",
|
| 637 |
+
"sawmill",
|
| 638 |
+
"magnetic compass",
|
| 639 |
+
"messenger bag",
|
| 640 |
+
"mailbox",
|
| 641 |
+
"tights",
|
| 642 |
+
"one-piece bathing suit",
|
| 643 |
+
"manhole cover",
|
| 644 |
+
"maraca",
|
| 645 |
+
"marimba",
|
| 646 |
+
"mask",
|
| 647 |
+
"matchstick",
|
| 648 |
+
"maypole",
|
| 649 |
+
"maze",
|
| 650 |
+
"measuring cup",
|
| 651 |
+
"medicine cabinet",
|
| 652 |
+
"megalith",
|
| 653 |
+
"microphone",
|
| 654 |
+
"microwave oven",
|
| 655 |
+
"military uniform",
|
| 656 |
+
"milk can",
|
| 657 |
+
"minibus",
|
| 658 |
+
"miniskirt",
|
| 659 |
+
"minivan",
|
| 660 |
+
"missile",
|
| 661 |
+
"mitten",
|
| 662 |
+
"mixing bowl",
|
| 663 |
+
"mobile home",
|
| 664 |
+
"ford model t",
|
| 665 |
+
"modem",
|
| 666 |
+
"monastery",
|
| 667 |
+
"monitor",
|
| 668 |
+
"moped",
|
| 669 |
+
"mortar and pestle",
|
| 670 |
+
"graduation cap",
|
| 671 |
+
"mosque",
|
| 672 |
+
"mosquito net",
|
| 673 |
+
"vespa",
|
| 674 |
+
"mountain bike",
|
| 675 |
+
"tent",
|
| 676 |
+
"computer mouse",
|
| 677 |
+
"mousetrap",
|
| 678 |
+
"moving van",
|
| 679 |
+
"muzzle",
|
| 680 |
+
"metal nail",
|
| 681 |
+
"neck brace",
|
| 682 |
+
"necklace",
|
| 683 |
+
"baby pacifier",
|
| 684 |
+
"notebook computer",
|
| 685 |
+
"obelisk",
|
| 686 |
+
"oboe",
|
| 687 |
+
"ocarina",
|
| 688 |
+
"odometer",
|
| 689 |
+
"oil filter",
|
| 690 |
+
"pipe organ",
|
| 691 |
+
"oscilloscope",
|
| 692 |
+
"overskirt",
|
| 693 |
+
"bullock cart",
|
| 694 |
+
"oxygen mask",
|
| 695 |
+
"product packet / packaging",
|
| 696 |
+
"paddle",
|
| 697 |
+
"paddle wheel",
|
| 698 |
+
"padlock",
|
| 699 |
+
"paintbrush",
|
| 700 |
+
"pajamas",
|
| 701 |
+
"palace",
|
| 702 |
+
"pan flute",
|
| 703 |
+
"paper towel",
|
| 704 |
+
"parachute",
|
| 705 |
+
"parallel bars",
|
| 706 |
+
"park bench",
|
| 707 |
+
"parking meter",
|
| 708 |
+
"railroad car",
|
| 709 |
+
"patio",
|
| 710 |
+
"payphone",
|
| 711 |
+
"pedestal",
|
| 712 |
+
"pencil case",
|
| 713 |
+
"pencil sharpener",
|
| 714 |
+
"perfume",
|
| 715 |
+
"Petri dish",
|
| 716 |
+
"photocopier",
|
| 717 |
+
"plectrum",
|
| 718 |
+
"Pickelhaube",
|
| 719 |
+
"picket fence",
|
| 720 |
+
"pickup truck",
|
| 721 |
+
"pier",
|
| 722 |
+
"piggy bank",
|
| 723 |
+
"pill bottle",
|
| 724 |
+
"pillow",
|
| 725 |
+
"ping-pong ball",
|
| 726 |
+
"pinwheel",
|
| 727 |
+
"pirate ship",
|
| 728 |
+
"drink pitcher",
|
| 729 |
+
"block plane",
|
| 730 |
+
"planetarium",
|
| 731 |
+
"plastic bag",
|
| 732 |
+
"plate rack",
|
| 733 |
+
"farm plow",
|
| 734 |
+
"plunger",
|
| 735 |
+
"Polaroid camera",
|
| 736 |
+
"pole",
|
| 737 |
+
"police van",
|
| 738 |
+
"poncho",
|
| 739 |
+
"pool table",
|
| 740 |
+
"soda bottle",
|
| 741 |
+
"plant pot",
|
| 742 |
+
"potter's wheel",
|
| 743 |
+
"power drill",
|
| 744 |
+
"prayer rug",
|
| 745 |
+
"printer",
|
| 746 |
+
"prison",
|
| 747 |
+
"missile",
|
| 748 |
+
"projector",
|
| 749 |
+
"hockey puck",
|
| 750 |
+
"punching bag",
|
| 751 |
+
"purse",
|
| 752 |
+
"quill",
|
| 753 |
+
"quilt",
|
| 754 |
+
"race car",
|
| 755 |
+
"racket",
|
| 756 |
+
"radiator",
|
| 757 |
+
"radio",
|
| 758 |
+
"radio telescope",
|
| 759 |
+
"rain barrel",
|
| 760 |
+
"recreational vehicle",
|
| 761 |
+
"fishing casting reel",
|
| 762 |
+
"reflex camera",
|
| 763 |
+
"refrigerator",
|
| 764 |
+
"remote control",
|
| 765 |
+
"restaurant",
|
| 766 |
+
"revolver",
|
| 767 |
+
"rifle",
|
| 768 |
+
"rocking chair",
|
| 769 |
+
"rotisserie",
|
| 770 |
+
"eraser",
|
| 771 |
+
"rugby ball",
|
| 772 |
+
"ruler measuring stick",
|
| 773 |
+
"sneaker",
|
| 774 |
+
"safe",
|
| 775 |
+
"safety pin",
|
| 776 |
+
"salt shaker",
|
| 777 |
+
"sandal",
|
| 778 |
+
"sarong",
|
| 779 |
+
"saxophone",
|
| 780 |
+
"scabbard",
|
| 781 |
+
"weighing scale",
|
| 782 |
+
"school bus",
|
| 783 |
+
"schooner",
|
| 784 |
+
"scoreboard",
|
| 785 |
+
"CRT monitor",
|
| 786 |
+
"screw",
|
| 787 |
+
"screwdriver",
|
| 788 |
+
"seat belt",
|
| 789 |
+
"sewing machine",
|
| 790 |
+
"shield",
|
| 791 |
+
"shoe store",
|
| 792 |
+
"shoji screen / room divider",
|
| 793 |
+
"shopping basket",
|
| 794 |
+
"shopping cart",
|
| 795 |
+
"shovel",
|
| 796 |
+
"shower cap",
|
| 797 |
+
"shower curtain",
|
| 798 |
+
"ski",
|
| 799 |
+
"balaclava ski mask",
|
| 800 |
+
"sleeping bag",
|
| 801 |
+
"slide rule",
|
| 802 |
+
"sliding door",
|
| 803 |
+
"slot machine",
|
| 804 |
+
"snorkel",
|
| 805 |
+
"snowmobile",
|
| 806 |
+
"snowplow",
|
| 807 |
+
"soap dispenser",
|
| 808 |
+
"soccer ball",
|
| 809 |
+
"sock",
|
| 810 |
+
"solar thermal collector",
|
| 811 |
+
"sombrero",
|
| 812 |
+
"soup bowl",
|
| 813 |
+
"keyboard space bar",
|
| 814 |
+
"space heater",
|
| 815 |
+
"space shuttle",
|
| 816 |
+
"spatula",
|
| 817 |
+
"motorboat",
|
| 818 |
+
"spider web",
|
| 819 |
+
"spindle",
|
| 820 |
+
"sports car",
|
| 821 |
+
"spotlight",
|
| 822 |
+
"stage",
|
| 823 |
+
"steam locomotive",
|
| 824 |
+
"through arch bridge",
|
| 825 |
+
"steel drum",
|
| 826 |
+
"stethoscope",
|
| 827 |
+
"scarf",
|
| 828 |
+
"stone wall",
|
| 829 |
+
"stopwatch",
|
| 830 |
+
"stove",
|
| 831 |
+
"strainer",
|
| 832 |
+
"tram",
|
| 833 |
+
"stretcher",
|
| 834 |
+
"couch",
|
| 835 |
+
"stupa",
|
| 836 |
+
"submarine",
|
| 837 |
+
"suit",
|
| 838 |
+
"sundial",
|
| 839 |
+
"sunglasses",
|
| 840 |
+
"sunglasses",
|
| 841 |
+
"sunscreen",
|
| 842 |
+
"suspension bridge",
|
| 843 |
+
"mop",
|
| 844 |
+
"sweatshirt",
|
| 845 |
+
"swim trunks / shorts",
|
| 846 |
+
"swing",
|
| 847 |
+
"electrical switch",
|
| 848 |
+
"syringe",
|
| 849 |
+
"table lamp",
|
| 850 |
+
"tank",
|
| 851 |
+
"tape player",
|
| 852 |
+
"teapot",
|
| 853 |
+
"teddy bear",
|
| 854 |
+
"television",
|
| 855 |
+
"tennis ball",
|
| 856 |
+
"thatched roof",
|
| 857 |
+
"front curtain",
|
| 858 |
+
"thimble",
|
| 859 |
+
"threshing machine",
|
| 860 |
+
"throne",
|
| 861 |
+
"tile roof",
|
| 862 |
+
"toaster",
|
| 863 |
+
"tobacco shop",
|
| 864 |
+
"toilet seat",
|
| 865 |
+
"torch",
|
| 866 |
+
"totem pole",
|
| 867 |
+
"tow truck",
|
| 868 |
+
"toy store",
|
| 869 |
+
"tractor",
|
| 870 |
+
"semi-trailer truck",
|
| 871 |
+
"tray",
|
| 872 |
+
"trench coat",
|
| 873 |
+
"tricycle",
|
| 874 |
+
"trimaran",
|
| 875 |
+
"tripod",
|
| 876 |
+
"triumphal arch",
|
| 877 |
+
"trolleybus",
|
| 878 |
+
"trombone",
|
| 879 |
+
"hot tub",
|
| 880 |
+
"turnstile",
|
| 881 |
+
"typewriter keyboard",
|
| 882 |
+
"umbrella",
|
| 883 |
+
"unicycle",
|
| 884 |
+
"upright piano",
|
| 885 |
+
"vacuum cleaner",
|
| 886 |
+
"vase",
|
| 887 |
+
"vaulted or arched ceiling",
|
| 888 |
+
"velvet fabric",
|
| 889 |
+
"vending machine",
|
| 890 |
+
"vestment",
|
| 891 |
+
"viaduct",
|
| 892 |
+
"violin",
|
| 893 |
+
"volleyball",
|
| 894 |
+
"waffle iron",
|
| 895 |
+
"wall clock",
|
| 896 |
+
"wallet",
|
| 897 |
+
"wardrobe",
|
| 898 |
+
"military aircraft",
|
| 899 |
+
"sink",
|
| 900 |
+
"washing machine",
|
| 901 |
+
"water bottle",
|
| 902 |
+
"water jug",
|
| 903 |
+
"water tower",
|
| 904 |
+
"whiskey jug",
|
| 905 |
+
"whistle",
|
| 906 |
+
"hair wig",
|
| 907 |
+
"window screen",
|
| 908 |
+
"window shade",
|
| 909 |
+
"Windsor tie",
|
| 910 |
+
"wine bottle",
|
| 911 |
+
"airplane wing",
|
| 912 |
+
"wok",
|
| 913 |
+
"wooden spoon",
|
| 914 |
+
"wool",
|
| 915 |
+
"split-rail fence",
|
| 916 |
+
"shipwreck",
|
| 917 |
+
"sailboat",
|
| 918 |
+
"yurt",
|
| 919 |
+
"website",
|
| 920 |
+
"comic book",
|
| 921 |
+
"crossword",
|
| 922 |
+
"traffic or street sign",
|
| 923 |
+
"traffic light",
|
| 924 |
+
"dust jacket",
|
| 925 |
+
"menu",
|
| 926 |
+
"plate",
|
| 927 |
+
"guacamole",
|
| 928 |
+
"consomme",
|
| 929 |
+
"hot pot",
|
| 930 |
+
"trifle",
|
| 931 |
+
"ice cream",
|
| 932 |
+
"popsicle",
|
| 933 |
+
"baguette",
|
| 934 |
+
"bagel",
|
| 935 |
+
"pretzel",
|
| 936 |
+
"cheeseburger",
|
| 937 |
+
"hot dog",
|
| 938 |
+
"mashed potatoes",
|
| 939 |
+
"cabbage",
|
| 940 |
+
"broccoli",
|
| 941 |
+
"cauliflower",
|
| 942 |
+
"zucchini",
|
| 943 |
+
"spaghetti squash",
|
| 944 |
+
"acorn squash",
|
| 945 |
+
"butternut squash",
|
| 946 |
+
"cucumber",
|
| 947 |
+
"artichoke",
|
| 948 |
+
"bell pepper",
|
| 949 |
+
"cardoon",
|
| 950 |
+
"mushroom",
|
| 951 |
+
"Granny Smith apple",
|
| 952 |
+
"strawberry",
|
| 953 |
+
"orange",
|
| 954 |
+
"lemon",
|
| 955 |
+
"fig",
|
| 956 |
+
"pineapple",
|
| 957 |
+
"banana",
|
| 958 |
+
"jackfruit",
|
| 959 |
+
"cherimoya (custard apple)",
|
| 960 |
+
"pomegranate",
|
| 961 |
+
"hay",
|
| 962 |
+
"carbonara",
|
| 963 |
+
"chocolate syrup",
|
| 964 |
+
"dough",
|
| 965 |
+
"meatloaf",
|
| 966 |
+
"pizza",
|
| 967 |
+
"pot pie",
|
| 968 |
+
"burrito",
|
| 969 |
+
"red wine",
|
| 970 |
+
"espresso",
|
| 971 |
+
"tea cup",
|
| 972 |
+
"eggnog",
|
| 973 |
+
"mountain",
|
| 974 |
+
"bubble",
|
| 975 |
+
"cliff",
|
| 976 |
+
"coral reef",
|
| 977 |
+
"geyser",
|
| 978 |
+
"lakeshore",
|
| 979 |
+
"promontory",
|
| 980 |
+
"sandbar",
|
| 981 |
+
"beach",
|
| 982 |
+
"valley",
|
| 983 |
+
"volcano",
|
| 984 |
+
"baseball player",
|
| 985 |
+
"bridegroom",
|
| 986 |
+
"scuba diver",
|
| 987 |
+
"rapeseed",
|
| 988 |
+
"daisy",
|
| 989 |
+
"yellow lady's slipper",
|
| 990 |
+
"corn",
|
| 991 |
+
"acorn",
|
| 992 |
+
"rose hip",
|
| 993 |
+
"horse chestnut seed",
|
| 994 |
+
"coral fungus",
|
| 995 |
+
"agaric",
|
| 996 |
+
"gyromitra",
|
| 997 |
+
"stinkhorn mushroom",
|
| 998 |
+
"earth star fungus",
|
| 999 |
+
"hen of the woods mushroom",
|
| 1000 |
+
"bolete",
|
| 1001 |
+
"corn cob",
|
| 1002 |
+
"toilet paper",
|
| 1003 |
+
]
|
| 1004 |
+
# Maps numeric class ids to labels
|
| 1005 |
+
IMAGENET_1K_CLASS_ID_TO_LABEL = dict(
|
| 1006 |
+
zip(range(len(openai_imagenet_classnames)), openai_imagenet_classnames)
|
| 1007 |
+
)
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/__init__.py
ADDED
|
File without changes
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (171 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/flamingo.cpython-310.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/helpers.cpython-310.pyc
ADDED
|
Binary file (6.78 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/flamingo.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from einops import rearrange
|
| 3 |
+
from torch import nn
|
| 4 |
+
|
| 5 |
+
from .helpers import PerceiverResampler
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Flamingo(nn.Module):
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
vision_encoder: nn.Module,
|
| 12 |
+
lang_encoder: nn.Module,
|
| 13 |
+
eoc_token_id: int,
|
| 14 |
+
media_token_id: int,
|
| 15 |
+
vis_dim: int,
|
| 16 |
+
cross_attn_every_n_layers: int = 1,
|
| 17 |
+
use_media_placement_augmentation: bool = False,
|
| 18 |
+
):
|
| 19 |
+
"""
|
| 20 |
+
Args:
|
| 21 |
+
vision_encoder (nn.Module): HF CLIPModel
|
| 22 |
+
lang_encoder (nn.Module): HF causal language model
|
| 23 |
+
eoc_token_id (int): Token id for <|endofchunk|>
|
| 24 |
+
media_token_id (int): Token id for <image>
|
| 25 |
+
vis_dim (int): Dimension of the visual features.
|
| 26 |
+
Visual features are projected to match this shape along the last dimension.
|
| 27 |
+
cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.
|
| 28 |
+
use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.
|
| 29 |
+
"""
|
| 30 |
+
super().__init__()
|
| 31 |
+
self.eoc_token_id = eoc_token_id
|
| 32 |
+
self.media_token_id = media_token_id
|
| 33 |
+
self.use_media_placement_augmentation = use_media_placement_augmentation
|
| 34 |
+
self.vis_dim = vis_dim
|
| 35 |
+
self.vision_encoder = vision_encoder
|
| 36 |
+
self.perceiver = PerceiverResampler(dim=self.vis_dim)
|
| 37 |
+
self.lang_encoder = lang_encoder
|
| 38 |
+
self.lang_encoder.init_flamingo(
|
| 39 |
+
media_token_id=media_token_id,
|
| 40 |
+
vis_hidden_size=self.vis_dim,
|
| 41 |
+
cross_attn_every_n_layers=cross_attn_every_n_layers,
|
| 42 |
+
use_media_placement_augmentation=self.use_media_placement_augmentation,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def forward(
|
| 46 |
+
self,
|
| 47 |
+
vision_x: torch.Tensor,
|
| 48 |
+
lang_x: torch.Tensor,
|
| 49 |
+
attention_mask: torch.Tensor = None,
|
| 50 |
+
labels: torch.Tensor = None,
|
| 51 |
+
use_cached_vision_x: bool = False,
|
| 52 |
+
clear_conditioned_layers: bool = True,
|
| 53 |
+
past_key_values=None,
|
| 54 |
+
use_cache: bool = False,
|
| 55 |
+
):
|
| 56 |
+
"""
|
| 57 |
+
Forward pass of Flamingo.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
vision_x (torch.Tensor): Vision input
|
| 61 |
+
shape (B, T_img, F, C, H, W) with F=1
|
| 62 |
+
lang_x (torch.Tensor): Language input ids
|
| 63 |
+
shape (B, T_txt)
|
| 64 |
+
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
|
| 65 |
+
labels (torch.Tensor, optional): Labels. Defaults to None.
|
| 66 |
+
clear_conditioned_layers: if True, clear the conditioned layers
|
| 67 |
+
once the foward pass is completed. Set this to false if the
|
| 68 |
+
same set of images will be reused in another subsequent
|
| 69 |
+
forward pass.
|
| 70 |
+
past_key_values: pre-computed values to pass to language model.
|
| 71 |
+
See past_key_values documentation in Hugging Face
|
| 72 |
+
CausalLM models.
|
| 73 |
+
use_cache: whether to use cached key values. See use_cache
|
| 74 |
+
documentation in Hugging Face CausalLM models.
|
| 75 |
+
"""
|
| 76 |
+
assert (
|
| 77 |
+
vision_x is not None
|
| 78 |
+
) or use_cached_vision_x, (
|
| 79 |
+
"Must provide either vision_x or use_cached_vision_x to True."
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
if use_cached_vision_x:
|
| 83 |
+
# Case: use cached; vision_x should be cached and other
|
| 84 |
+
# vision-related inputs should not be provided.
|
| 85 |
+
assert (
|
| 86 |
+
vision_x is None
|
| 87 |
+
), "Expect vision_x to be None when use_cached_vision_x is True."
|
| 88 |
+
assert self.lang_encoder.is_conditioned()
|
| 89 |
+
|
| 90 |
+
else:
|
| 91 |
+
# Case: do not use caching (i.e. this is a standard forward pass);
|
| 92 |
+
self._encode_vision_x(vision_x=vision_x)
|
| 93 |
+
|
| 94 |
+
output = self.lang_encoder(
|
| 95 |
+
input_ids=lang_x,
|
| 96 |
+
attention_mask=attention_mask,
|
| 97 |
+
labels=labels,
|
| 98 |
+
past_key_values=past_key_values,
|
| 99 |
+
use_cache=use_cache,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
if clear_conditioned_layers:
|
| 103 |
+
self.lang_encoder.clear_conditioned_layers()
|
| 104 |
+
|
| 105 |
+
return output
|
| 106 |
+
|
| 107 |
+
def generate(
|
| 108 |
+
self,
|
| 109 |
+
vision_x: torch.Tensor,
|
| 110 |
+
lang_x: torch.Tensor,
|
| 111 |
+
attention_mask: torch.Tensor = None,
|
| 112 |
+
num_beams=1,
|
| 113 |
+
max_new_tokens=None,
|
| 114 |
+
temperature=1.0,
|
| 115 |
+
top_k=0,
|
| 116 |
+
top_p=1.0,
|
| 117 |
+
no_repeat_ngram_size=0,
|
| 118 |
+
prefix_allowed_tokens_fn=None,
|
| 119 |
+
length_penalty=1.0,
|
| 120 |
+
num_return_sequences=1,
|
| 121 |
+
do_sample=False,
|
| 122 |
+
early_stopping=False,
|
| 123 |
+
):
|
| 124 |
+
"""
|
| 125 |
+
Generate text conditioned on vision and language inputs.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
vision_x (torch.Tensor): Vision input
|
| 129 |
+
shape (B, T_img, F, C, H, W)
|
| 130 |
+
images in the same chunk are collated along T_img, and frames are collated along F
|
| 131 |
+
currently only F=1 is supported (single-frame videos)
|
| 132 |
+
lang_x (torch.Tensor): Language input
|
| 133 |
+
shape (B, T_txt)
|
| 134 |
+
max_length (int, optional): Maximum length of the output. Defaults to None.
|
| 135 |
+
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
|
| 136 |
+
num_beams (int, optional): Number of beams. Defaults to 1.
|
| 137 |
+
max_new_tokens (int, optional): Maximum new tokens. Defaults to None.
|
| 138 |
+
temperature (float, optional): Temperature. Defaults to 1.0.
|
| 139 |
+
top_k (int, optional): Top k. Defaults to 0.
|
| 140 |
+
top_p (float, optional): Top p. Defaults to 1.0.
|
| 141 |
+
no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.
|
| 142 |
+
length_penalty (float, optional): Length penalty. Defaults to 1.0.
|
| 143 |
+
num_return_sequences (int, optional): Number of return sequences. Defaults to 1.
|
| 144 |
+
do_sample (bool, optional): Do sample. Defaults to False.
|
| 145 |
+
early_stopping (bool, optional): Early stopping. Defaults to False.
|
| 146 |
+
Returns:
|
| 147 |
+
torch.Tensor: lang_x with generated tokens appended to it
|
| 148 |
+
"""
|
| 149 |
+
if num_beams > 1:
|
| 150 |
+
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
|
| 151 |
+
|
| 152 |
+
self._encode_vision_x(vision_x=vision_x)
|
| 153 |
+
|
| 154 |
+
output = self.lang_encoder.generate(
|
| 155 |
+
lang_x,
|
| 156 |
+
attention_mask=attention_mask,
|
| 157 |
+
eos_token_id=self.eoc_token_id,
|
| 158 |
+
num_beams=num_beams,
|
| 159 |
+
max_new_tokens=max_new_tokens,
|
| 160 |
+
temperature=temperature,
|
| 161 |
+
top_k=top_k,
|
| 162 |
+
top_p=top_p,
|
| 163 |
+
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
| 164 |
+
no_repeat_ngram_size=no_repeat_ngram_size,
|
| 165 |
+
length_penalty=length_penalty,
|
| 166 |
+
num_return_sequences=num_return_sequences,
|
| 167 |
+
do_sample=do_sample,
|
| 168 |
+
early_stopping=early_stopping,
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
self.lang_encoder.clear_conditioned_layers()
|
| 172 |
+
return output
|
| 173 |
+
|
| 174 |
+
def _encode_vision_x(self, vision_x: torch.Tensor):
|
| 175 |
+
"""
|
| 176 |
+
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
|
| 177 |
+
Args:
|
| 178 |
+
vision_x (torch.Tensor): Vision input
|
| 179 |
+
shape (B, T_img, F, C, H, W)
|
| 180 |
+
Images in the same chunk are collated along T_img, and frames are collated along F
|
| 181 |
+
Currently only F=1 is supported (single-frame videos)
|
| 182 |
+
|
| 183 |
+
rearrange code based on https://github.com/dhansmair/flamingo-mini
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
|
| 187 |
+
b, T, F = vision_x.shape[:3]
|
| 188 |
+
assert F == 1, "Only single frame supported"
|
| 189 |
+
|
| 190 |
+
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
|
| 191 |
+
with torch.no_grad():
|
| 192 |
+
vision_x = self.vision_encoder.visual(vision_x)[1]
|
| 193 |
+
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
|
| 194 |
+
|
| 195 |
+
vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)
|
| 196 |
+
|
| 197 |
+
for layer in self.lang_encoder._get_decoder_layers():
|
| 198 |
+
layer.condition_vis_x(vision_x)
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/flamingo_lm.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
from .helpers import GatedCrossAttentionBlock
|
| 6 |
+
from .utils import getattr_recursive, setattr_recursive
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FlamingoLayer(nn.Module):
|
| 10 |
+
def __init__(self, gated_cross_attn_layer, decoder_layer):
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.gated_cross_attn_layer = gated_cross_attn_layer
|
| 13 |
+
self.decoder_layer = decoder_layer
|
| 14 |
+
self.vis_x = None
|
| 15 |
+
self.media_locations = None
|
| 16 |
+
|
| 17 |
+
def is_conditioned(self) -> bool:
|
| 18 |
+
"""Check whether the layer is conditioned."""
|
| 19 |
+
return self.vis_x is not None
|
| 20 |
+
|
| 21 |
+
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
|
| 22 |
+
def condition_vis_x(self, vis_x):
|
| 23 |
+
self.vis_x = vis_x
|
| 24 |
+
|
| 25 |
+
def condition_media_locations(self, media_locations):
|
| 26 |
+
self.media_locations = media_locations
|
| 27 |
+
|
| 28 |
+
def condition_attend_previous(self, attend_previous):
|
| 29 |
+
self.attend_previous = attend_previous
|
| 30 |
+
|
| 31 |
+
def forward(
|
| 32 |
+
self,
|
| 33 |
+
lang_x,
|
| 34 |
+
attention_mask=None,
|
| 35 |
+
**decoder_layer_kwargs,
|
| 36 |
+
):
|
| 37 |
+
if self.gated_cross_attn_layer is None:
|
| 38 |
+
return self.decoder_layer(
|
| 39 |
+
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
if self.vis_x is None:
|
| 43 |
+
raise ValueError("vis_x must be conditioned before forward pass")
|
| 44 |
+
|
| 45 |
+
if self.media_locations is None:
|
| 46 |
+
raise ValueError("media_locations must be conditioned before forward pass")
|
| 47 |
+
|
| 48 |
+
lang_x = self.gated_cross_attn_layer(
|
| 49 |
+
lang_x,
|
| 50 |
+
self.vis_x,
|
| 51 |
+
media_locations=self.media_locations,
|
| 52 |
+
attend_previous=self.attend_previous,
|
| 53 |
+
)
|
| 54 |
+
lang_x = self.decoder_layer(
|
| 55 |
+
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
|
| 56 |
+
)
|
| 57 |
+
return lang_x
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class FlamingoLMMixin(nn.Module):
|
| 61 |
+
"""
|
| 62 |
+
Mixin to add cross-attention layers to a language model.
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
|
| 66 |
+
self.decoder_layers_attr_name = decoder_layers_attr_name
|
| 67 |
+
|
| 68 |
+
def _get_decoder_layers(self):
|
| 69 |
+
return getattr_recursive(self, self.decoder_layers_attr_name)
|
| 70 |
+
|
| 71 |
+
def _set_decoder_layers(self, value):
|
| 72 |
+
setattr_recursive(self, self.decoder_layers_attr_name, value)
|
| 73 |
+
|
| 74 |
+
def init_flamingo(
|
| 75 |
+
self,
|
| 76 |
+
media_token_id,
|
| 77 |
+
vis_hidden_size,
|
| 78 |
+
cross_attn_every_n_layers,
|
| 79 |
+
use_media_placement_augmentation,
|
| 80 |
+
):
|
| 81 |
+
"""
|
| 82 |
+
Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
self.gated_cross_attn_layers = nn.ModuleList(
|
| 86 |
+
[
|
| 87 |
+
GatedCrossAttentionBlock(
|
| 88 |
+
dim=self.config.hidden_size, dim_visual=vis_hidden_size
|
| 89 |
+
)
|
| 90 |
+
if (layer_idx + 1) % cross_attn_every_n_layers == 0
|
| 91 |
+
else None
|
| 92 |
+
for layer_idx, _ in enumerate(self._get_decoder_layers())
|
| 93 |
+
]
|
| 94 |
+
)
|
| 95 |
+
self._set_decoder_layers(
|
| 96 |
+
nn.ModuleList(
|
| 97 |
+
[
|
| 98 |
+
FlamingoLayer(gated_cross_attn_layer, decoder_layer)
|
| 99 |
+
for gated_cross_attn_layer, decoder_layer in zip(
|
| 100 |
+
self.gated_cross_attn_layers, self._get_decoder_layers()
|
| 101 |
+
)
|
| 102 |
+
]
|
| 103 |
+
)
|
| 104 |
+
)
|
| 105 |
+
self.media_token_id = media_token_id
|
| 106 |
+
self.use_media_placement_augmentation = use_media_placement_augmentation
|
| 107 |
+
self.initialized_flamingo = True
|
| 108 |
+
|
| 109 |
+
def forward(self, *input, **kwargs):
|
| 110 |
+
"""Condition the Flamingo layers on the media locations before forward()"""
|
| 111 |
+
if not self.initialized_flamingo:
|
| 112 |
+
raise ValueError(
|
| 113 |
+
"Flamingo layers are not initialized. Please call `init_flamingo` first."
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
input_ids = kwargs["input_ids"] if "input_ids" in kwargs else input[0]
|
| 117 |
+
media_locations = input_ids == self.media_token_id
|
| 118 |
+
attend_previous = (
|
| 119 |
+
(random.random() < 0.5) if self.use_media_placement_augmentation else False
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
for layer in self.get_decoder().layers:
|
| 123 |
+
layer.condition_media_locations(media_locations)
|
| 124 |
+
layer.condition_attend_previous(attend_previous)
|
| 125 |
+
|
| 126 |
+
return super().forward(
|
| 127 |
+
*input, **kwargs
|
| 128 |
+
) # Call the other parent's forward method
|
| 129 |
+
|
| 130 |
+
def is_conditioned(self) -> bool:
|
| 131 |
+
"""Check whether all decoder layers are already conditioned."""
|
| 132 |
+
return all(l.is_conditioned() for l in self._get_decoder_layers())
|
| 133 |
+
|
| 134 |
+
def clear_conditioned_layers(self):
|
| 135 |
+
for layer in self._get_decoder_layers():
|
| 136 |
+
layer.condition_vis_x(None)
|
| 137 |
+
layer.condition_media_locations(None)
|
| 138 |
+
layer.condition_attend_previous(None)
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/helpers.py
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Taken from https://github.com/lucidrains/flamingo-pytorch
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from einops import rearrange, repeat
|
| 7 |
+
from einops_exts import rearrange_many
|
| 8 |
+
from torch import einsum, nn
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def exists(val):
|
| 12 |
+
return val is not None
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def FeedForward(dim, mult=4):
|
| 16 |
+
inner_dim = int(dim * mult)
|
| 17 |
+
return nn.Sequential(
|
| 18 |
+
nn.LayerNorm(dim),
|
| 19 |
+
nn.Linear(dim, inner_dim, bias=False),
|
| 20 |
+
nn.GELU(),
|
| 21 |
+
nn.Linear(inner_dim, dim, bias=False),
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class PerceiverAttention(nn.Module):
|
| 26 |
+
def __init__(self, *, dim, dim_head=64, heads=8):
|
| 27 |
+
super().__init__()
|
| 28 |
+
self.scale = dim_head**-0.5
|
| 29 |
+
self.heads = heads
|
| 30 |
+
inner_dim = dim_head * heads
|
| 31 |
+
|
| 32 |
+
self.norm_media = nn.LayerNorm(dim)
|
| 33 |
+
self.norm_latents = nn.LayerNorm(dim)
|
| 34 |
+
|
| 35 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 36 |
+
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
|
| 37 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 38 |
+
|
| 39 |
+
def forward(self, x, latents):
|
| 40 |
+
"""
|
| 41 |
+
Args:
|
| 42 |
+
x (torch.Tensor): image features
|
| 43 |
+
shape (b, T, n1, D)
|
| 44 |
+
latent (torch.Tensor): latent features
|
| 45 |
+
shape (b, T, n2, D)
|
| 46 |
+
"""
|
| 47 |
+
x = self.norm_media(x)
|
| 48 |
+
latents = self.norm_latents(latents)
|
| 49 |
+
|
| 50 |
+
h = self.heads
|
| 51 |
+
|
| 52 |
+
q = self.to_q(latents)
|
| 53 |
+
kv_input = torch.cat((x, latents), dim=-2)
|
| 54 |
+
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
|
| 55 |
+
q, k, v = rearrange_many((q, k, v), "b t n (h d) -> b h t n d", h=h)
|
| 56 |
+
q = q * self.scale
|
| 57 |
+
|
| 58 |
+
# attention
|
| 59 |
+
sim = einsum("... i d, ... j d -> ... i j", q, k)
|
| 60 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 61 |
+
attn = sim.softmax(dim=-1)
|
| 62 |
+
|
| 63 |
+
out = einsum("... i j, ... j d -> ... i d", attn, v)
|
| 64 |
+
out = rearrange(out, "b h t n d -> b t n (h d)", h=h)
|
| 65 |
+
return self.to_out(out)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class PerceiverResampler(nn.Module):
|
| 69 |
+
def __init__(
|
| 70 |
+
self,
|
| 71 |
+
*,
|
| 72 |
+
dim,
|
| 73 |
+
depth=6,
|
| 74 |
+
dim_head=64,
|
| 75 |
+
heads=8,
|
| 76 |
+
num_latents=64,
|
| 77 |
+
max_num_media=None,
|
| 78 |
+
max_num_frames=None,
|
| 79 |
+
ff_mult=4,
|
| 80 |
+
):
|
| 81 |
+
super().__init__()
|
| 82 |
+
self.latents = nn.Parameter(torch.randn(num_latents, dim))
|
| 83 |
+
self.frame_embs = (
|
| 84 |
+
nn.Parameter(torch.randn(max_num_frames, dim))
|
| 85 |
+
if exists(max_num_frames)
|
| 86 |
+
else None
|
| 87 |
+
)
|
| 88 |
+
self.media_time_embs = (
|
| 89 |
+
nn.Parameter(torch.randn(max_num_media, 1, dim))
|
| 90 |
+
if exists(max_num_media)
|
| 91 |
+
else None
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
self.layers = nn.ModuleList([])
|
| 95 |
+
for _ in range(depth):
|
| 96 |
+
self.layers.append(
|
| 97 |
+
nn.ModuleList(
|
| 98 |
+
[
|
| 99 |
+
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
|
| 100 |
+
FeedForward(dim=dim, mult=ff_mult),
|
| 101 |
+
]
|
| 102 |
+
)
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
self.norm = nn.LayerNorm(dim)
|
| 106 |
+
|
| 107 |
+
def forward(self, x):
|
| 108 |
+
"""
|
| 109 |
+
Args:
|
| 110 |
+
x (torch.Tensor): image features
|
| 111 |
+
shape (b, T, F, v, D)
|
| 112 |
+
Returns:
|
| 113 |
+
shape (b, T, n, D) where n is self.num_latents
|
| 114 |
+
"""
|
| 115 |
+
b, T, F, v = x.shape[:4]
|
| 116 |
+
|
| 117 |
+
# frame and media time embeddings
|
| 118 |
+
if exists(self.frame_embs):
|
| 119 |
+
frame_embs = repeat(self.frame_embs[:F], "F d -> b T F v d", b=b, T=T, v=v)
|
| 120 |
+
x = x + frame_embs
|
| 121 |
+
x = rearrange(
|
| 122 |
+
x, "b T F v d -> b T (F v) d"
|
| 123 |
+
) # flatten the frame and spatial dimensions
|
| 124 |
+
if exists(self.media_time_embs):
|
| 125 |
+
x = x + self.media_time_embs[:T]
|
| 126 |
+
|
| 127 |
+
# blocks
|
| 128 |
+
latents = repeat(self.latents, "n d -> b T n d", b=b, T=T)
|
| 129 |
+
for attn, ff in self.layers:
|
| 130 |
+
latents = attn(x, latents) + latents
|
| 131 |
+
latents = ff(latents) + latents
|
| 132 |
+
return self.norm(latents)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
# gated cross attention
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class MaskedCrossAttention(nn.Module):
|
| 139 |
+
def __init__(
|
| 140 |
+
self,
|
| 141 |
+
*,
|
| 142 |
+
dim,
|
| 143 |
+
dim_visual,
|
| 144 |
+
dim_head=64,
|
| 145 |
+
heads=8,
|
| 146 |
+
only_attend_immediate_media=True,
|
| 147 |
+
):
|
| 148 |
+
super().__init__()
|
| 149 |
+
self.scale = dim_head**-0.5
|
| 150 |
+
self.heads = heads
|
| 151 |
+
inner_dim = dim_head * heads
|
| 152 |
+
|
| 153 |
+
self.norm = nn.LayerNorm(dim)
|
| 154 |
+
|
| 155 |
+
self.to_q = nn.Linear(dim, inner_dim, bias=False)
|
| 156 |
+
self.to_kv = nn.Linear(dim_visual, inner_dim * 2, bias=False)
|
| 157 |
+
self.to_out = nn.Linear(inner_dim, dim, bias=False)
|
| 158 |
+
|
| 159 |
+
# whether for text to only attend to immediate preceding image, or all previous images
|
| 160 |
+
self.only_attend_immediate_media = only_attend_immediate_media
|
| 161 |
+
|
| 162 |
+
def forward(self, x, media, media_locations=None, attend_previous=True):
|
| 163 |
+
"""
|
| 164 |
+
Args:
|
| 165 |
+
x (torch.Tensor): text features
|
| 166 |
+
shape (B, T_txt, D_txt)
|
| 167 |
+
media (torch.Tensor): image features
|
| 168 |
+
shape (B, T_img, n, D_img) where n is the dim of the latents
|
| 169 |
+
media_locations: boolean mask identifying the media tokens in x
|
| 170 |
+
shape (B, T_txt)
|
| 171 |
+
attend_previous: bool
|
| 172 |
+
If false, ignores immediately preceding image and starts attending when following image
|
| 173 |
+
"""
|
| 174 |
+
_, T_img, n = media.shape[:3]
|
| 175 |
+
h = self.heads
|
| 176 |
+
|
| 177 |
+
x = self.norm(x)
|
| 178 |
+
|
| 179 |
+
q = self.to_q(x)
|
| 180 |
+
media = rearrange(media, "b t n d -> b (t n) d")
|
| 181 |
+
|
| 182 |
+
k, v = self.to_kv(media).chunk(2, dim=-1)
|
| 183 |
+
q, k, v = rearrange_many((q, k, v), "b n (h d) -> b h n d", h=h)
|
| 184 |
+
|
| 185 |
+
q = q * self.scale
|
| 186 |
+
|
| 187 |
+
sim = einsum("... i d, ... j d -> ... i j", q, k)
|
| 188 |
+
|
| 189 |
+
if exists(media_locations):
|
| 190 |
+
# at each boolean of True, increment the time counter (relative to media time)
|
| 191 |
+
text_time = media_locations.cumsum(dim=-1)
|
| 192 |
+
media_time = torch.arange(T_img, device=x.device) + 1
|
| 193 |
+
|
| 194 |
+
if not attend_previous:
|
| 195 |
+
text_time[~media_locations] += 1
|
| 196 |
+
# make sure max is still the number of images in the sequence
|
| 197 |
+
text_time[
|
| 198 |
+
text_time
|
| 199 |
+
> repeat(
|
| 200 |
+
torch.count_nonzero(media_locations, dim=1),
|
| 201 |
+
"b -> b i",
|
| 202 |
+
i=text_time.shape[1],
|
| 203 |
+
)
|
| 204 |
+
] = 0
|
| 205 |
+
|
| 206 |
+
# text time must equal media time if only attending to most immediate image
|
| 207 |
+
# otherwise, as long as text time is greater than media time (if attending to all previous images / media)
|
| 208 |
+
mask_op = torch.eq if self.only_attend_immediate_media else torch.ge
|
| 209 |
+
|
| 210 |
+
text_to_media_mask = mask_op(
|
| 211 |
+
rearrange(text_time, "b i -> b 1 i 1"),
|
| 212 |
+
repeat(media_time, "j -> 1 1 1 (j n)", n=n),
|
| 213 |
+
)
|
| 214 |
+
sim = sim.masked_fill(~text_to_media_mask, -torch.finfo(sim.dtype).max)
|
| 215 |
+
|
| 216 |
+
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
|
| 217 |
+
attn = sim.softmax(dim=-1)
|
| 218 |
+
|
| 219 |
+
if exists(media_locations) and self.only_attend_immediate_media:
|
| 220 |
+
# any text without a preceding media needs to have attention zeroed out
|
| 221 |
+
text_without_media_mask = text_time == 0
|
| 222 |
+
text_without_media_mask = rearrange(
|
| 223 |
+
text_without_media_mask, "b i -> b 1 i 1"
|
| 224 |
+
)
|
| 225 |
+
attn = attn.masked_fill(text_without_media_mask, 0.0)
|
| 226 |
+
|
| 227 |
+
out = einsum("... i j, ... j d -> ... i d", attn, v)
|
| 228 |
+
out = rearrange(out, "b h n d -> b n (h d)")
|
| 229 |
+
return self.to_out(out)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
class GatedCrossAttentionBlock(nn.Module):
|
| 233 |
+
def __init__(
|
| 234 |
+
self,
|
| 235 |
+
*,
|
| 236 |
+
dim,
|
| 237 |
+
dim_visual,
|
| 238 |
+
dim_head=64,
|
| 239 |
+
heads=8,
|
| 240 |
+
ff_mult=4,
|
| 241 |
+
only_attend_immediate_media=True,
|
| 242 |
+
):
|
| 243 |
+
super().__init__()
|
| 244 |
+
self.attn = MaskedCrossAttention(
|
| 245 |
+
dim=dim,
|
| 246 |
+
dim_visual=dim_visual,
|
| 247 |
+
dim_head=dim_head,
|
| 248 |
+
heads=heads,
|
| 249 |
+
only_attend_immediate_media=only_attend_immediate_media,
|
| 250 |
+
)
|
| 251 |
+
self.attn_gate = nn.Parameter(torch.tensor([0.0]))
|
| 252 |
+
|
| 253 |
+
self.ff = FeedForward(dim, mult=ff_mult)
|
| 254 |
+
self.ff_gate = nn.Parameter(torch.tensor([0.0]))
|
| 255 |
+
|
| 256 |
+
def forward(
|
| 257 |
+
self,
|
| 258 |
+
x,
|
| 259 |
+
media,
|
| 260 |
+
media_locations=None,
|
| 261 |
+
attend_previous=True,
|
| 262 |
+
):
|
| 263 |
+
x = (
|
| 264 |
+
self.attn(
|
| 265 |
+
x,
|
| 266 |
+
media,
|
| 267 |
+
media_locations=media_locations,
|
| 268 |
+
attend_previous=attend_previous,
|
| 269 |
+
)
|
| 270 |
+
* self.attn_gate.tanh()
|
| 271 |
+
+ x
|
| 272 |
+
)
|
| 273 |
+
x = self.ff(x) * self.ff_gate.tanh() + x
|
| 274 |
+
|
| 275 |
+
return x
|
minigpt2/lib/python3.10/site-packages/open_flamingo/src/utils.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def extend_instance(obj, mixin):
|
| 2 |
+
"""Apply mixins to a class instance after creation"""
|
| 3 |
+
base_cls = obj.__class__
|
| 4 |
+
base_cls_name = obj.__class__.__name__
|
| 5 |
+
obj.__class__ = type(
|
| 6 |
+
base_cls_name, (mixin, base_cls), {}
|
| 7 |
+
) # mixin needs to go first for our forward() logic to work
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def getattr_recursive(obj, att):
|
| 11 |
+
"""
|
| 12 |
+
Return nested attribute of obj
|
| 13 |
+
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
|
| 14 |
+
"""
|
| 15 |
+
if att == "":
|
| 16 |
+
return obj
|
| 17 |
+
i = att.find(".")
|
| 18 |
+
if i < 0:
|
| 19 |
+
return getattr(obj, att)
|
| 20 |
+
else:
|
| 21 |
+
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def setattr_recursive(obj, att, val):
|
| 25 |
+
"""
|
| 26 |
+
Set nested attribute of obj
|
| 27 |
+
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
|
| 28 |
+
"""
|
| 29 |
+
if "." in att:
|
| 30 |
+
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
|
| 31 |
+
setattr(obj, att.split(".")[-1], val)
|
minigpt2/lib/python3.10/site-packages/open_flamingo/train/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
minigpt2/lib/python3.10/site-packages/open_flamingo/train/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/train/__pycache__/data.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
minigpt2/lib/python3.10/site-packages/open_flamingo/train/__pycache__/distributed.cpython-310.pyc
ADDED
|
Binary file (2.83 kB). View file
|
|
|