diff --git a/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/AUTHORS b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..9311b39626f9cab901128b2442841a7218774cc0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/AUTHORS @@ -0,0 +1,58 @@ +GitPython was originally written by Michael Trier. +GitPython 0.2 was partially (re)written by Sebastian Thiel, based on 0.1.6 and git-dulwich. + +Contributors are: + +-Michael Trier +-Alan Briolat +-Florian Apolloner +-David Aguilar +-Jelmer Vernooij +-Steve Frécinaux +-Kai Lautaportti +-Paul Sowden +-Sebastian Thiel +-Jonathan Chu +-Vincent Driessen +-Phil Elson +-Bernard `Guyzmo` Pratz +-Timothy B. Hartman +-Konstantin Popov +-Peter Jones +-Anson Mansfield +-Ken Odegard +-Alexis Horgix Chotard +-Piotr Babij +-Mikuláš Poul +-Charles Bouchard-Légaré +-Yaroslav Halchenko +-Tim Swast +-William Luc Ritchie +-David Host +-A. Jesse Jiryu Davis +-Steven Whitman +-Stefan Stancu +-César Izurieta +-Arthur Milchior +-Anil Khatri +-JJ Graham +-Ben Thayer +-Dries Kennes +-Pratik Anurag +-Harmon +-Liam Beguin +-Ram Rachum +-Alba Mendez +-Robert Westman +-Hugo van Kemenade +-Hiroki Tokunaga +-Julien Mauroy +-Patrick Gerard +-Luke Twist +-Joseph Hale +-Santos Gallegos +-Wenhan Zhu +-Eliah Kagan +-Ethan Lin + +Portions derived from other open source works and are clearly marked. diff --git a/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/INSTALLER b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/LICENSE b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ba8a219fe1f27c10b50df8cd4f26c0ab833bbbc8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/LICENSE @@ -0,0 +1,29 @@ +Copyright (C) 2008, 2009 Michael Trier and contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +* Neither the name of the GitPython project nor the names of +its contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/top_level.txt b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..5664e303b5dc2e9ef8e14a0845d9486ec1920afd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/GitPython-3.1.43.dist-info/top_level.txt @@ -0,0 +1 @@ +git diff --git a/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/INSTALLER b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/LICENSE b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d41d808995af2d59db2496a3ae772ca3d849cab2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, matplotlib project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the matplotlib project nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/METADATA b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e81ab4fa3c9649ef7bc6355d1042f0344c90d83b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/METADATA @@ -0,0 +1,78 @@ +Metadata-Version: 2.1 +Name: cycler +Version: 0.12.1 +Summary: Composable style cycles +Author-email: Thomas A Caswell +License: Copyright (c) 2015, matplotlib project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the matplotlib project nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Project-URL: homepage, https://matplotlib.org/cycler/ +Project-URL: repository, https://github.com/matplotlib/cycler +Keywords: cycle kwargs +Classifier: License :: OSI Approved :: BSD License +Classifier: Development Status :: 4 - Beta +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: docs +Requires-Dist: ipython ; extra == 'docs' +Requires-Dist: matplotlib ; extra == 'docs' +Requires-Dist: numpydoc ; extra == 'docs' +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: tests +Requires-Dist: pytest ; extra == 'tests' +Requires-Dist: pytest-cov ; extra == 'tests' +Requires-Dist: pytest-xdist ; extra == 'tests' + +|PyPi|_ |Conda|_ |Supported Python versions|_ |GitHub Actions|_ |Codecov|_ + +.. |PyPi| image:: https://img.shields.io/pypi/v/cycler.svg?style=flat +.. _PyPi: https://pypi.python.org/pypi/cycler + +.. |Conda| image:: https://img.shields.io/conda/v/conda-forge/cycler +.. _Conda: https://anaconda.org/conda-forge/cycler + +.. |Supported Python versions| image:: https://img.shields.io/pypi/pyversions/cycler.svg +.. _Supported Python versions: https://pypi.python.org/pypi/cycler + +.. |GitHub Actions| image:: https://github.com/matplotlib/cycler/actions/workflows/tests.yml/badge.svg +.. _GitHub Actions: https://github.com/matplotlib/cycler/actions + +.. |Codecov| image:: https://codecov.io/github/matplotlib/cycler/badge.svg?branch=main&service=github +.. _Codecov: https://codecov.io/github/matplotlib/cycler?branch=main + +cycler: composable cycles +========================= + +Docs: https://matplotlib.org/cycler/ diff --git a/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/RECORD b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..20c501fafa881709a8f5e19ce12ce91373d3555d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/RECORD @@ -0,0 +1,10 @@ +cycler-0.12.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cycler-0.12.1.dist-info/LICENSE,sha256=8SGBQ9dm2j_qZvEzlrfxXfRqgzA_Kb-Wum6Y601C9Ag,1497 +cycler-0.12.1.dist-info/METADATA,sha256=IyieGbdvHgE5Qidpbmryts0c556JcxIJv5GVFIsY7TY,3779 +cycler-0.12.1.dist-info/RECORD,, +cycler-0.12.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +cycler-0.12.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +cycler-0.12.1.dist-info/top_level.txt,sha256=D8BVVDdAAelLb2FOEz7lDpc6-AL21ylKPrMhtG6yzyE,7 +cycler/__init__.py,sha256=1JdRgv5Zzxo-W1ev7B_LWquysWP6LZH6CHk_COtIaXE,16709 +cycler/__pycache__/__init__.cpython-310.pyc,, +cycler/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/REQUESTED b/videollama2/lib/python3.10/site-packages/cycler-0.12.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videollama2/lib/python3.10/site-packages/setuptools/config/__pycache__/_apply_pyprojecttoml.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/setuptools/config/__pycache__/_apply_pyprojecttoml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75c8a5bd277ff36287ede46fbb66244e05941188 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/setuptools/config/__pycache__/_apply_pyprojecttoml.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/setuptools/config/__pycache__/pyprojecttoml.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/setuptools/config/__pycache__/pyprojecttoml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31674ada870338aa8f4947161373844fc7f77666 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/setuptools/config/__pycache__/pyprojecttoml.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/__pycache__/extra_validations.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/__pycache__/extra_validations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e91394e94c43cb1ddae9e47fad9ea75a9e07054 Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/__pycache__/extra_validations.cpython-310.pyc differ diff --git a/videollama2/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/formats.py b/videollama2/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/formats.py new file mode 100644 index 0000000000000000000000000000000000000000..153b1f0b27fa07974a5cf8d4151540edfec7e8eb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/setuptools/config/_validate_pyproject/formats.py @@ -0,0 +1,375 @@ +""" +The functions in this module are used to validate schemas with the +`format JSON Schema keyword +`_. + +The correspondence is given by replacing the ``_`` character in the name of the +function with a ``-`` to obtain the format name and vice versa. +""" + +import builtins +import logging +import os +import re +import string +import typing +from itertools import chain as _chain + +if typing.TYPE_CHECKING: + from typing_extensions import Literal + +_logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------------- +# PEP 440 + +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_REGEX = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.X | re.I)
+
+
+def pep440(version: str) -> bool:
+    """See :ref:`PyPA's version specification `
+    (initially introduced in :pep:`440`).
+    """
+    return VERSION_REGEX.match(version) is not None
+
+
+# -------------------------------------------------------------------------------------
+# PEP 508
+
+PEP508_IDENTIFIER_PATTERN = r"([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])"
+PEP508_IDENTIFIER_REGEX = re.compile(f"^{PEP508_IDENTIFIER_PATTERN}$", re.I)
+
+
+def pep508_identifier(name: str) -> bool:
+    """See :ref:`PyPA's name specification `
+    (initially introduced in :pep:`508#names`).
+    """
+    return PEP508_IDENTIFIER_REGEX.match(name) is not None
+
+
+try:
+    try:
+        from packaging import requirements as _req
+    except ImportError:  # pragma: no cover
+        # let's try setuptools vendored version
+        from setuptools._vendor.packaging import (  # type: ignore[no-redef]
+            requirements as _req,
+        )
+
+    def pep508(value: str) -> bool:
+        """See :ref:`PyPA's dependency specifiers `
+        (initially introduced in :pep:`508`).
+        """
+        try:
+            _req.Requirement(value)
+            return True
+        except _req.InvalidRequirement:
+            return False
+
+except ImportError:  # pragma: no cover
+    _logger.warning(
+        "Could not find an installation of `packaging`. Requirements, dependencies and "
+        "versions might not be validated. "
+        "To enforce validation, please install `packaging`."
+    )
+
+    def pep508(value: str) -> bool:
+        return True
+
+
+def pep508_versionspec(value: str) -> bool:
+    """Expression that can be used to specify/lock versions (including ranges)
+    See ``versionspec`` in :ref:`PyPA's dependency specifiers
+    ` (initially introduced in :pep:`508`).
+    """
+    if any(c in value for c in (";", "]", "@")):
+        # In PEP 508:
+        # conditional markers, extras and URL specs are not included in the
+        # versionspec
+        return False
+    # Let's pretend we have a dependency called `requirement` with the given
+    # version spec, then we can reuse the pep508 function for validation:
+    return pep508(f"requirement{value}")
+
+
+# -------------------------------------------------------------------------------------
+# PEP 517
+
+
+def pep517_backend_reference(value: str) -> bool:
+    """See PyPA's specification for defining build-backend references
+    introduced in :pep:`517#source-trees`.
+
+    This is similar to an entry-point reference (e.g., ``package.module:object``).
+    """
+    module, _, obj = value.partition(":")
+    identifiers = (i.strip() for i in _chain(module.split("."), obj.split(".")))
+    return all(python_identifier(i) for i in identifiers if i)
+
+
+# -------------------------------------------------------------------------------------
+# Classifiers - PEP 301
+
+
+def _download_classifiers() -> str:
+    import ssl
+    from email.message import Message
+    from urllib.request import urlopen
+
+    url = "https://pypi.org/pypi?:action=list_classifiers"
+    context = ssl.create_default_context()
+    with urlopen(url, context=context) as response:  # noqa: S310 (audit URLs)
+        headers = Message()
+        headers["content_type"] = response.getheader("content-type", "text/plain")
+        return response.read().decode(headers.get_param("charset", "utf-8"))  # type: ignore[no-any-return]
+
+
+class _TroveClassifier:
+    """The ``trove_classifiers`` package is the official way of validating classifiers,
+    however this package might not be always available.
+    As a workaround we can still download a list from PyPI.
+    We also don't want to be over strict about it, so simply skipping silently is an
+    option (classifiers will be validated anyway during the upload to PyPI).
+    """
+
+    downloaded: typing.Union[None, "Literal[False]", typing.Set[str]]
+
+    def __init__(self) -> None:
+        self.downloaded = None
+        self._skip_download = False
+        # None => not cached yet
+        # False => cache not available
+        self.__name__ = "trove_classifier"  # Emulate a public function
+
+    def _disable_download(self) -> None:
+        # This is a private API. Only setuptools has the consent of using it.
+        self._skip_download = True
+
+    def __call__(self, value: str) -> bool:
+        if self.downloaded is False or self._skip_download is True:
+            return True
+
+        if os.getenv("NO_NETWORK") or os.getenv("VALIDATE_PYPROJECT_NO_NETWORK"):
+            self.downloaded = False
+            msg = (
+                "Install ``trove-classifiers`` to ensure proper validation. "
+                "Skipping download of classifiers list from PyPI (NO_NETWORK)."
+            )
+            _logger.debug(msg)
+            return True
+
+        if self.downloaded is None:
+            msg = (
+                "Install ``trove-classifiers`` to ensure proper validation. "
+                "Meanwhile a list of classifiers will be downloaded from PyPI."
+            )
+            _logger.debug(msg)
+            try:
+                self.downloaded = set(_download_classifiers().splitlines())
+            except Exception:
+                self.downloaded = False
+                _logger.debug("Problem with download, skipping validation")
+                return True
+
+        return value in self.downloaded or value.lower().startswith("private ::")
+
+
+try:
+    from trove_classifiers import classifiers as _trove_classifiers
+
+    def trove_classifier(value: str) -> bool:
+        """See https://pypi.org/classifiers/"""
+        return value in _trove_classifiers or value.lower().startswith("private ::")
+
+except ImportError:  # pragma: no cover
+    trove_classifier = _TroveClassifier()
+
+
+# -------------------------------------------------------------------------------------
+# Stub packages - PEP 561
+
+
+def pep561_stub_name(value: str) -> bool:
+    """Name of a directory containing type stubs.
+    It must follow the name scheme ``-stubs`` as defined in
+    :pep:`561#stub-only-packages`.
+    """
+    top, *children = value.split(".")
+    if not top.endswith("-stubs"):
+        return False
+    return python_module_name(".".join([top[: -len("-stubs")], *children]))
+
+
+# -------------------------------------------------------------------------------------
+# Non-PEP related
+
+
+def url(value: str) -> bool:
+    """Valid URL (validation uses :obj:`urllib.parse`).
+    For maximum compatibility please make sure to include a ``scheme`` prefix
+    in your URL (e.g. ``http://``).
+    """
+    from urllib.parse import urlparse
+
+    try:
+        parts = urlparse(value)
+        if not parts.scheme:
+            _logger.warning(
+                "For maximum compatibility please make sure to include a "
+                "`scheme` prefix in your URL (e.g. 'http://'). "
+                f"Given value: {value}"
+            )
+            if not (value.startswith("/") or value.startswith("\\") or "@" in value):
+                parts = urlparse(f"http://{value}")
+
+        return bool(parts.scheme and parts.netloc)
+    except Exception:
+        return False
+
+
+# https://packaging.python.org/specifications/entry-points/
+ENTRYPOINT_PATTERN = r"[^\[\s=]([^=]*[^\s=])?"
+ENTRYPOINT_REGEX = re.compile(f"^{ENTRYPOINT_PATTERN}$", re.I)
+RECOMMEDED_ENTRYPOINT_PATTERN = r"[\w.-]+"
+RECOMMEDED_ENTRYPOINT_REGEX = re.compile(f"^{RECOMMEDED_ENTRYPOINT_PATTERN}$", re.I)
+ENTRYPOINT_GROUP_PATTERN = r"\w+(\.\w+)*"
+ENTRYPOINT_GROUP_REGEX = re.compile(f"^{ENTRYPOINT_GROUP_PATTERN}$", re.I)
+
+
+def python_identifier(value: str) -> bool:
+    """Can be used as identifier in Python.
+    (Validation uses :obj:`str.isidentifier`).
+    """
+    return value.isidentifier()
+
+
+def python_qualified_identifier(value: str) -> bool:
+    """
+    Python "dotted identifier", i.e. a sequence of :obj:`python_identifier`
+    concatenated with ``"."`` (e.g.: ``package.module.submodule``).
+    """
+    if value.startswith(".") or value.endswith("."):
+        return False
+    return all(python_identifier(m) for m in value.split("."))
+
+
+def python_module_name(value: str) -> bool:
+    """Module name that can be used in an ``import``-statement in Python.
+    See :obj:`python_qualified_identifier`.
+    """
+    return python_qualified_identifier(value)
+
+
+def python_module_name_relaxed(value: str) -> bool:
+    """Similar to :obj:`python_module_name`, but relaxed to also accept
+    dash characters (``-``) and cover special cases like ``pip-run``.
+
+    It is recommended, however, that beginners avoid dash characters,
+    as they require advanced knowledge about Python internals.
+
+    The following are disallowed:
+
+    * names starting/ending in dashes,
+    * names ending in ``-stubs`` (potentially collide with :obj:`pep561_stub_name`).
+    """
+    if value.startswith("-") or value.endswith("-"):
+        return False
+    if value.endswith("-stubs"):
+        return False  # Avoid collision with PEP 561
+    return python_module_name(value.replace("-", "_"))
+
+
+def python_entrypoint_group(value: str) -> bool:
+    """See ``Data model > group`` in the :ref:`PyPA's entry-points specification
+    `.
+    """
+    return ENTRYPOINT_GROUP_REGEX.match(value) is not None
+
+
+def python_entrypoint_name(value: str) -> bool:
+    """See ``Data model > name`` in the :ref:`PyPA's entry-points specification
+    `.
+    """
+    if not ENTRYPOINT_REGEX.match(value):
+        return False
+    if not RECOMMEDED_ENTRYPOINT_REGEX.match(value):
+        msg = f"Entry point `{value}` does not follow recommended pattern: "
+        msg += RECOMMEDED_ENTRYPOINT_PATTERN
+        _logger.warning(msg)
+    return True
+
+
+def python_entrypoint_reference(value: str) -> bool:
+    """Reference to a Python object using in the format::
+
+        importable.module:object.attr
+
+    See ``Data model >object reference`` in the :ref:`PyPA's entry-points specification
+    `.
+    """
+    module, _, rest = value.partition(":")
+    if "[" in rest:
+        obj, _, extras_ = rest.partition("[")
+        if extras_.strip()[-1] != "]":
+            return False
+        extras = (x.strip() for x in extras_.strip(string.whitespace + "[]").split(","))
+        if not all(pep508_identifier(e) for e in extras):
+            return False
+        _logger.warning(f"`{value}` - using extras for entry points is not recommended")
+    else:
+        obj = rest
+
+    module_parts = module.split(".")
+    identifiers = _chain(module_parts, obj.split(".")) if rest else module_parts
+    return all(python_identifier(i.strip()) for i in identifiers)
+
+
+def uint8(value: builtins.int) -> bool:
+    r"""Unsigned 8-bit integer (:math:`0 \leq x < 2^8`)"""
+    return 0 <= value < 2**8
+
+
+def uint16(value: builtins.int) -> bool:
+    r"""Unsigned 16-bit integer (:math:`0 \leq x < 2^{16}`)"""
+    return 0 <= value < 2**16
+
+
+def uint(value: builtins.int) -> bool:
+    r"""Unsigned 64-bit integer (:math:`0 \leq x < 2^{64}`)"""
+    return 0 <= value < 2**64
+
+
+def int(value: builtins.int) -> bool:
+    r"""Signed 64-bit integer (:math:`-2^{63} \leq x < 2^{63}`)"""
+    return -(2**63) <= value < 2**63
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/__init__.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb70bfb7115a2a94a8b942b31cafc3a550f0c005
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/__init__.py
@@ -0,0 +1,13 @@
+import locale
+import sys
+
+import pytest
+
+__all__ = ['fail_on_ascii']
+
+if sys.version_info >= (3, 11):
+    locale_encoding = locale.getencoding()
+else:
+    locale_encoding = locale.getpreferredencoding(False)
+is_ascii = locale_encoding == 'ANSI_X3.4-1968'
+fail_on_ascii = pytest.mark.xfail(is_ascii, reason="Test fails in this locale")
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_setupcfg.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_setupcfg.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5db03ed7af9fe3d6b00c14299c4ac73a0a11257f
Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/__pycache__/test_setupcfg.cpython-310.pyc differ
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/downloads/preload.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/downloads/preload.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eeb5dd75d3dcb375cee5acaf11ad385084bff5a
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/downloads/preload.py
@@ -0,0 +1,18 @@
+"""This file can be used to preload files needed for testing.
+
+For example you can use::
+
+    cd setuptools/tests/config
+    python -m downloads.preload setupcfg_examples.txt
+
+to make sure the `setup.cfg` examples are downloaded before starting the tests.
+"""
+
+import sys
+from pathlib import Path
+
+from . import retrieve_file, urls_from_file
+
+if __name__ == "__main__":
+    urls = urls_from_file(Path(sys.argv[1]))
+    list(map(retrieve_file, urls))
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/setupcfg_examples.txt b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/setupcfg_examples.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6aab887ff1fe631d97f1abea90a8448040746a12
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/setupcfg_examples.txt
@@ -0,0 +1,22 @@
+# ====================================================================
+# Some popular packages that use setup.cfg (and others not so popular)
+# Reference: https://hugovk.github.io/top-pypi-packages/
+# ====================================================================
+https://github.com/pypa/setuptools/raw/52c990172fec37766b3566679724aa8bf70ae06d/setup.cfg
+https://github.com/pypa/wheel/raw/0acd203cd896afec7f715aa2ff5980a403459a3b/setup.cfg
+https://github.com/python/importlib_metadata/raw/2f05392ca980952a6960d82b2f2d2ea10aa53239/setup.cfg
+https://github.com/jaraco/skeleton/raw/d9008b5c510cd6969127a6a2ab6f832edddef296/setup.cfg
+https://github.com/jaraco/zipp/raw/700d3a96390e970b6b962823bfea78b4f7e1c537/setup.cfg
+https://github.com/pallets/jinja/raw/7d72eb7fefb7dce065193967f31f805180508448/setup.cfg
+https://github.com/tkem/cachetools/raw/2fd87a94b8d3861d80e9e4236cd480bfdd21c90d/setup.cfg
+https://github.com/aio-libs/aiohttp/raw/5e0e6b7080f2408d5f1dd544c0e1cf88378b7b10/setup.cfg
+https://github.com/pallets/flask/raw/9486b6cf57bd6a8a261f67091aca8ca78eeec1e3/setup.cfg
+https://github.com/pallets/click/raw/6411f425fae545f42795665af4162006b36c5e4a/setup.cfg
+https://github.com/sqlalchemy/sqlalchemy/raw/533f5718904b620be8d63f2474229945d6f8ba5d/setup.cfg
+https://github.com/pytest-dev/pluggy/raw/461ef63291d13589c4e21aa182cd1529257e9a0a/setup.cfg
+https://github.com/pytest-dev/pytest/raw/c7be96dae487edbd2f55b561b31b68afac1dabe6/setup.cfg
+https://github.com/platformdirs/platformdirs/raw/7b7852128dd6f07511b618d6edea35046bd0c6ff/setup.cfg
+https://github.com/pandas-dev/pandas/raw/bc17343f934a33dc231c8c74be95d8365537c376/setup.cfg
+https://github.com/django/django/raw/4e249d11a6e56ca8feb4b055b681cec457ef3a3d/setup.cfg
+https://github.com/pyscaffold/pyscaffold/raw/de7aa5dc059fbd04307419c667cc4961bc9df4b8/setup.cfg
+https://github.com/pypa/virtualenv/raw/f92eda6e3da26a4d28c2663ffb85c4960bdb990c/setup.cfg
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_apply_pyprojecttoml.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_apply_pyprojecttoml.py
new file mode 100644
index 0000000000000000000000000000000000000000..20146b4a89f567b6038bcba9c36d40cbb933ef51
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_apply_pyprojecttoml.py
@@ -0,0 +1,539 @@
+"""Make sure that applying the configuration from pyproject.toml is equivalent to
+applying a similar configuration from setup.cfg
+
+To run these tests offline, please have a look on ``./downloads/preload.py``
+"""
+
+from __future__ import annotations
+
+import io
+import re
+import tarfile
+from inspect import cleandoc
+from pathlib import Path
+from unittest.mock import Mock
+
+import pytest
+from ini2toml.api import LiteTranslator
+from packaging.metadata import Metadata
+
+import setuptools  # noqa: F401 # ensure monkey patch to metadata
+from setuptools._static import is_static
+from setuptools.command.egg_info import write_requirements
+from setuptools.config import expand, pyprojecttoml, setupcfg
+from setuptools.config._apply_pyprojecttoml import _MissingDynamic, _some_attrgetter
+from setuptools.dist import Distribution
+from setuptools.errors import RemovedConfigError
+
+from .downloads import retrieve_file, urls_from_file
+
+HERE = Path(__file__).parent
+EXAMPLES_FILE = "setupcfg_examples.txt"
+
+
+def makedist(path, **attrs):
+    return Distribution({"src_root": path, **attrs})
+
+
+@pytest.mark.parametrize("url", urls_from_file(HERE / EXAMPLES_FILE))
+@pytest.mark.filterwarnings("ignore")
+@pytest.mark.uses_network
+def test_apply_pyproject_equivalent_to_setupcfg(url, monkeypatch, tmp_path):
+    monkeypatch.setattr(expand, "read_attr", Mock(return_value="0.0.1"))
+    setupcfg_example = retrieve_file(url)
+    pyproject_example = Path(tmp_path, "pyproject.toml")
+    setupcfg_text = setupcfg_example.read_text(encoding="utf-8")
+    toml_config = LiteTranslator().translate(setupcfg_text, "setup.cfg")
+    pyproject_example.write_text(toml_config, encoding="utf-8")
+
+    dist_toml = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject_example)
+    dist_cfg = setupcfg.apply_configuration(makedist(tmp_path), setupcfg_example)
+
+    pkg_info_toml = core_metadata(dist_toml)
+    pkg_info_cfg = core_metadata(dist_cfg)
+    assert pkg_info_toml == pkg_info_cfg
+
+    if any(getattr(d, "license_files", None) for d in (dist_toml, dist_cfg)):
+        assert set(dist_toml.license_files) == set(dist_cfg.license_files)
+
+    if any(getattr(d, "entry_points", None) for d in (dist_toml, dist_cfg)):
+        print(dist_cfg.entry_points)
+        ep_toml = {
+            (k, *sorted(i.replace(" ", "") for i in v))
+            for k, v in dist_toml.entry_points.items()
+        }
+        ep_cfg = {
+            (k, *sorted(i.replace(" ", "") for i in v))
+            for k, v in dist_cfg.entry_points.items()
+        }
+        assert ep_toml == ep_cfg
+
+    if any(getattr(d, "package_data", None) for d in (dist_toml, dist_cfg)):
+        pkg_data_toml = {(k, *sorted(v)) for k, v in dist_toml.package_data.items()}
+        pkg_data_cfg = {(k, *sorted(v)) for k, v in dist_cfg.package_data.items()}
+        assert pkg_data_toml == pkg_data_cfg
+
+    if any(getattr(d, "data_files", None) for d in (dist_toml, dist_cfg)):
+        data_files_toml = {(k, *sorted(v)) for k, v in dist_toml.data_files}
+        data_files_cfg = {(k, *sorted(v)) for k, v in dist_cfg.data_files}
+        assert data_files_toml == data_files_cfg
+
+    assert set(dist_toml.install_requires) == set(dist_cfg.install_requires)
+    if any(getattr(d, "extras_require", None) for d in (dist_toml, dist_cfg)):
+        extra_req_toml = {(k, *sorted(v)) for k, v in dist_toml.extras_require.items()}
+        extra_req_cfg = {(k, *sorted(v)) for k, v in dist_cfg.extras_require.items()}
+        assert extra_req_toml == extra_req_cfg
+
+
+PEP621_EXAMPLE = """\
+[project]
+name = "spam"
+version = "2020.0.0"
+description = "Lovely Spam! Wonderful Spam!"
+readme = "README.rst"
+requires-python = ">=3.8"
+license = {file = "LICENSE.txt"}
+keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"]
+authors = [
+  {email = "hi@pradyunsg.me"},
+  {name = "Tzu-Ping Chung"}
+]
+maintainers = [
+  {name = "Brett Cannon", email = "brett@python.org"},
+  {name = "John X. Ãørçeč", email = "john@utf8.org"},
+  {name = "Γαμα קּ 東", email = "gama@utf8.org"},
+]
+classifiers = [
+  "Development Status :: 4 - Beta",
+  "Programming Language :: Python"
+]
+
+dependencies = [
+  "httpx",
+  "gidgethub[httpx]>4.0.0",
+  "django>2.1; os_name != 'nt'",
+  "django>2.0; os_name == 'nt'"
+]
+
+[project.optional-dependencies]
+test = [
+  "pytest < 5.0.0",
+  "pytest-cov[all]"
+]
+
+[project.urls]
+homepage = "http://example.com"
+documentation = "http://readthedocs.org"
+repository = "http://github.com"
+changelog = "http://github.com/me/spam/blob/master/CHANGELOG.md"
+
+[project.scripts]
+spam-cli = "spam:main_cli"
+
+[project.gui-scripts]
+spam-gui = "spam:main_gui"
+
+[project.entry-points."spam.magical"]
+tomatoes = "spam:main_tomatoes"
+"""
+
+PEP621_INTERNATIONAL_EMAIL_EXAMPLE = """\
+[project]
+name = "spam"
+version = "2020.0.0"
+authors = [
+  {email = "hi@pradyunsg.me"},
+  {name = "Tzu-Ping Chung"}
+]
+maintainers = [
+  {name = "Степан Бандера", email = "криївка@оун-упа.укр"},
+]
+"""
+
+PEP621_EXAMPLE_SCRIPT = """
+def main_cli(): pass
+def main_gui(): pass
+def main_tomatoes(): pass
+"""
+
+
+def _pep621_example_project(
+    tmp_path,
+    readme="README.rst",
+    pyproject_text=PEP621_EXAMPLE,
+):
+    pyproject = tmp_path / "pyproject.toml"
+    text = pyproject_text
+    replacements = {'readme = "README.rst"': f'readme = "{readme}"'}
+    for orig, subst in replacements.items():
+        text = text.replace(orig, subst)
+    pyproject.write_text(text, encoding="utf-8")
+
+    (tmp_path / readme).write_text("hello world", encoding="utf-8")
+    (tmp_path / "LICENSE.txt").write_text("--- LICENSE stub ---", encoding="utf-8")
+    (tmp_path / "spam.py").write_text(PEP621_EXAMPLE_SCRIPT, encoding="utf-8")
+    return pyproject
+
+
+def test_pep621_example(tmp_path):
+    """Make sure the example in PEP 621 works"""
+    pyproject = _pep621_example_project(tmp_path)
+    dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+    assert dist.metadata.license == "--- LICENSE stub ---"
+    assert set(dist.metadata.license_files) == {"LICENSE.txt"}
+
+
+@pytest.mark.parametrize(
+    ("readme", "ctype"),
+    [
+        ("Readme.txt", "text/plain"),
+        ("readme.md", "text/markdown"),
+        ("text.rst", "text/x-rst"),
+    ],
+)
+def test_readme_content_type(tmp_path, readme, ctype):
+    pyproject = _pep621_example_project(tmp_path, readme)
+    dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+    assert dist.metadata.long_description_content_type == ctype
+
+
+def test_undefined_content_type(tmp_path):
+    pyproject = _pep621_example_project(tmp_path, "README.tex")
+    with pytest.raises(ValueError, match="Undefined content type for README.tex"):
+        pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+
+
+def test_no_explicit_content_type_for_missing_extension(tmp_path):
+    pyproject = _pep621_example_project(tmp_path, "README")
+    dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+    assert dist.metadata.long_description_content_type is None
+
+
+@pytest.mark.parametrize(
+    ("pyproject_text", "expected_maintainers_meta_value"),
+    (
+        pytest.param(
+            PEP621_EXAMPLE,
+            (
+                'Brett Cannon , "John X. Ãørçeč" , '
+                'Γαμα קּ 東 '
+            ),
+            id='non-international-emails',
+        ),
+        pytest.param(
+            PEP621_INTERNATIONAL_EMAIL_EXAMPLE,
+            'Степан Бандера <криївка@оун-упа.укр>',
+            marks=pytest.mark.xfail(
+                reason="CPython's `email.headerregistry.Address` only supports "
+                'RFC 5322, as of Nov 10, 2022 and latest Python 3.11.0',
+                strict=True,
+            ),
+            id='international-email',
+        ),
+    ),
+)
+def test_utf8_maintainer_in_metadata(  # issue-3663
+    expected_maintainers_meta_value,
+    pyproject_text,
+    tmp_path,
+):
+    pyproject = _pep621_example_project(
+        tmp_path,
+        "README",
+        pyproject_text=pyproject_text,
+    )
+    dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+    assert dist.metadata.maintainer_email == expected_maintainers_meta_value
+    pkg_file = tmp_path / "PKG-FILE"
+    with open(pkg_file, "w", encoding="utf-8") as fh:
+        dist.metadata.write_pkg_file(fh)
+    content = pkg_file.read_text(encoding="utf-8")
+    assert f"Maintainer-email: {expected_maintainers_meta_value}" in content
+
+
+class TestLicenseFiles:
+    # TODO: After PEP 639 is accepted, we have to move the license-files
+    #       to the `project` table instead of `tool.setuptools`
+
+    def base_pyproject(self, tmp_path, additional_text):
+        pyproject = _pep621_example_project(tmp_path, "README")
+        text = pyproject.read_text(encoding="utf-8")
+
+        # Sanity-check
+        assert 'license = {file = "LICENSE.txt"}' in text
+        assert "[tool.setuptools]" not in text
+
+        text = f"{text}\n{additional_text}\n"
+        pyproject.write_text(text, encoding="utf-8")
+        return pyproject
+
+    def test_both_license_and_license_files_defined(self, tmp_path):
+        setuptools_config = '[tool.setuptools]\nlicense-files = ["_FILE*"]'
+        pyproject = self.base_pyproject(tmp_path, setuptools_config)
+
+        (tmp_path / "_FILE.txt").touch()
+        (tmp_path / "_FILE.rst").touch()
+
+        # Would normally match the `license_files` patterns, but we want to exclude it
+        # by being explicit. On the other hand, contents should be added to `license`
+        license = tmp_path / "LICENSE.txt"
+        license.write_text("LicenseRef-Proprietary\n", encoding="utf-8")
+
+        dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+        assert set(dist.metadata.license_files) == {"_FILE.rst", "_FILE.txt"}
+        assert dist.metadata.license == "LicenseRef-Proprietary\n"
+
+    def test_default_patterns(self, tmp_path):
+        setuptools_config = '[tool.setuptools]\nzip-safe = false'
+        # ^ used just to trigger section validation
+        pyproject = self.base_pyproject(tmp_path, setuptools_config)
+
+        license_files = "LICENCE-a.html COPYING-abc.txt AUTHORS-xyz NOTICE,def".split()
+
+        for fname in license_files:
+            (tmp_path / fname).write_text(f"{fname}\n", encoding="utf-8")
+
+        dist = pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+        assert (tmp_path / "LICENSE.txt").exists()  # from base example
+        assert set(dist.metadata.license_files) == {*license_files, "LICENSE.txt"}
+
+
+class TestPyModules:
+    # https://github.com/pypa/setuptools/issues/4316
+
+    def dist(self, name):
+        toml_config = f"""
+        [project]
+        name = "test"
+        version = "42.0"
+        [tool.setuptools]
+        py-modules = [{name!r}]
+        """
+        pyproject = Path("pyproject.toml")
+        pyproject.write_text(cleandoc(toml_config), encoding="utf-8")
+        return pyprojecttoml.apply_configuration(Distribution({}), pyproject)
+
+    @pytest.mark.parametrize("module", ["pip-run", "abc-d.λ-xyz-e"])
+    def test_valid_module_name(self, tmp_path, monkeypatch, module):
+        monkeypatch.chdir(tmp_path)
+        assert module in self.dist(module).py_modules
+
+    @pytest.mark.parametrize("module", ["pip run", "-pip-run", "pip-run-stubs"])
+    def test_invalid_module_name(self, tmp_path, monkeypatch, module):
+        monkeypatch.chdir(tmp_path)
+        with pytest.raises(ValueError, match="py-modules"):
+            self.dist(module).py_modules
+
+
+class TestExtModules:
+    def test_pyproject_sets_attribute(self, tmp_path, monkeypatch):
+        monkeypatch.chdir(tmp_path)
+        pyproject = Path("pyproject.toml")
+        toml_config = """
+        [project]
+        name = "test"
+        version = "42.0"
+        [tool.setuptools]
+        ext-modules = [
+          {name = "my.ext", sources = ["hello.c", "world.c"]}
+        ]
+        """
+        pyproject.write_text(cleandoc(toml_config), encoding="utf-8")
+        with pytest.warns(pyprojecttoml._ExperimentalConfiguration):
+            dist = pyprojecttoml.apply_configuration(Distribution({}), pyproject)
+        assert len(dist.ext_modules) == 1
+        assert dist.ext_modules[0].name == "my.ext"
+        assert set(dist.ext_modules[0].sources) == {"hello.c", "world.c"}
+
+
+class TestDeprecatedFields:
+    def test_namespace_packages(self, tmp_path):
+        pyproject = tmp_path / "pyproject.toml"
+        config = """
+        [project]
+        name = "myproj"
+        version = "42"
+        [tool.setuptools]
+        namespace-packages = ["myproj.pkg"]
+        """
+        pyproject.write_text(cleandoc(config), encoding="utf-8")
+        with pytest.raises(RemovedConfigError, match="namespace-packages"):
+            pyprojecttoml.apply_configuration(makedist(tmp_path), pyproject)
+
+
+class TestPresetField:
+    def pyproject(self, tmp_path, dynamic, extra_content=""):
+        content = f"[project]\nname = 'proj'\ndynamic = {dynamic!r}\n"
+        if "version" not in dynamic:
+            content += "version = '42'\n"
+        file = tmp_path / "pyproject.toml"
+        file.write_text(content + extra_content, encoding="utf-8")
+        return file
+
+    @pytest.mark.parametrize(
+        ("attr", "field", "value"),
+        [
+            ("classifiers", "classifiers", ["Private :: Classifier"]),
+            ("entry_points", "scripts", {"console_scripts": ["foobar=foobar:main"]}),
+            ("entry_points", "gui-scripts", {"gui_scripts": ["bazquux=bazquux:main"]}),
+            pytest.param(
+                *("install_requires", "dependencies", ["six"]),
+                marks=[
+                    pytest.mark.filterwarnings("ignore:.*install_requires. overwritten")
+                ],
+            ),
+        ],
+    )
+    def test_not_listed_in_dynamic(self, tmp_path, attr, field, value):
+        """Setuptools cannot set a field if not listed in ``dynamic``"""
+        pyproject = self.pyproject(tmp_path, [])
+        dist = makedist(tmp_path, **{attr: value})
+        msg = re.compile(f"defined outside of `pyproject.toml`:.*{field}", re.S)
+        with pytest.warns(_MissingDynamic, match=msg):
+            dist = pyprojecttoml.apply_configuration(dist, pyproject)
+
+        dist_value = _some_attrgetter(f"metadata.{attr}", attr)(dist)
+        assert not dist_value
+
+    @pytest.mark.parametrize(
+        ("attr", "field", "value"),
+        [
+            ("install_requires", "dependencies", []),
+            ("extras_require", "optional-dependencies", {}),
+            ("install_requires", "dependencies", ["six"]),
+            ("classifiers", "classifiers", ["Private :: Classifier"]),
+        ],
+    )
+    def test_listed_in_dynamic(self, tmp_path, attr, field, value):
+        pyproject = self.pyproject(tmp_path, [field])
+        dist = makedist(tmp_path, **{attr: value})
+        dist = pyprojecttoml.apply_configuration(dist, pyproject)
+        dist_value = _some_attrgetter(f"metadata.{attr}", attr)(dist)
+        assert dist_value == value
+
+    def test_warning_overwritten_dependencies(self, tmp_path):
+        src = "[project]\nname='pkg'\nversion='0.1'\ndependencies=['click']\n"
+        pyproject = tmp_path / "pyproject.toml"
+        pyproject.write_text(src, encoding="utf-8")
+        dist = makedist(tmp_path, install_requires=["wheel"])
+        with pytest.warns(match="`install_requires` overwritten"):
+            dist = pyprojecttoml.apply_configuration(dist, pyproject)
+        assert "wheel" not in dist.install_requires
+
+    def test_optional_dependencies_dont_remove_env_markers(self, tmp_path):
+        """
+        Internally setuptools converts dependencies with markers to "extras".
+        If ``install_requires`` is given by ``setup.py``, we have to ensure that
+        applying ``optional-dependencies`` does not overwrite the mandatory
+        dependencies with markers (see #3204).
+        """
+        # If setuptools replace its internal mechanism that uses `requires.txt`
+        # this test has to be rewritten to adapt accordingly
+        extra = "\n[project.optional-dependencies]\nfoo = ['bar>1']\n"
+        pyproject = self.pyproject(tmp_path, ["dependencies"], extra)
+        install_req = ['importlib-resources (>=3.0.0) ; python_version < "3.7"']
+        dist = makedist(tmp_path, install_requires=install_req)
+        dist = pyprojecttoml.apply_configuration(dist, pyproject)
+        assert "foo" in dist.extras_require
+        egg_info = dist.get_command_obj("egg_info")
+        write_requirements(egg_info, tmp_path, tmp_path / "requires.txt")
+        reqs = (tmp_path / "requires.txt").read_text(encoding="utf-8")
+        assert "importlib-resources" in reqs
+        assert "bar" in reqs
+        assert ':python_version < "3.7"' in reqs
+
+    @pytest.mark.parametrize(
+        ("field", "group"),
+        [("scripts", "console_scripts"), ("gui-scripts", "gui_scripts")],
+    )
+    @pytest.mark.filterwarnings("error")
+    def test_scripts_dont_require_dynamic_entry_points(self, tmp_path, field, group):
+        # Issue 3862
+        pyproject = self.pyproject(tmp_path, [field])
+        dist = makedist(tmp_path, entry_points={group: ["foobar=foobar:main"]})
+        dist = pyprojecttoml.apply_configuration(dist, pyproject)
+        assert group in dist.entry_points
+
+
+class TestMeta:
+    def test_example_file_in_sdist(self, setuptools_sdist):
+        """Meta test to ensure tests can run from sdist"""
+        with tarfile.open(setuptools_sdist) as tar:
+            assert any(name.endswith(EXAMPLES_FILE) for name in tar.getnames())
+
+
+class TestInteropCommandLineParsing:
+    def test_version(self, tmp_path, monkeypatch, capsys):
+        # See pypa/setuptools#4047
+        # This test can be removed once the CLI interface of setup.py is removed
+        monkeypatch.chdir(tmp_path)
+        toml_config = """
+        [project]
+        name = "test"
+        version = "42.0"
+        """
+        pyproject = Path(tmp_path, "pyproject.toml")
+        pyproject.write_text(cleandoc(toml_config), encoding="utf-8")
+        opts = {"script_args": ["--version"]}
+        dist = pyprojecttoml.apply_configuration(Distribution(opts), pyproject)
+        dist.parse_command_line()  # <-- there should be no exception here.
+        captured = capsys.readouterr()
+        assert "42.0" in captured.out
+
+
+class TestStaticConfig:
+    def test_mark_static_fields(self, tmp_path, monkeypatch):
+        monkeypatch.chdir(tmp_path)
+        toml_config = """
+        [project]
+        name = "test"
+        version = "42.0"
+        dependencies = ["hello"]
+        keywords = ["world"]
+        classifiers = ["private :: hello world"]
+        [tool.setuptools]
+        obsoletes = ["abcd"]
+        provides = ["abcd"]
+        platforms = ["abcd"]
+        """
+        pyproject = Path(tmp_path, "pyproject.toml")
+        pyproject.write_text(cleandoc(toml_config), encoding="utf-8")
+        dist = pyprojecttoml.apply_configuration(Distribution({}), pyproject)
+        assert is_static(dist.install_requires)
+        assert is_static(dist.metadata.keywords)
+        assert is_static(dist.metadata.classifiers)
+        assert is_static(dist.metadata.obsoletes)
+        assert is_static(dist.metadata.provides)
+        assert is_static(dist.metadata.platforms)
+
+
+# --- Auxiliary Functions ---
+
+
+def core_metadata(dist) -> str:
+    with io.StringIO() as buffer:
+        dist.metadata.write_pkg_file(buffer)
+        pkg_file_txt = buffer.getvalue()
+
+    # Make sure core metadata is valid
+    Metadata.from_email(pkg_file_txt, validate=True)  # can raise exceptions
+
+    skip_prefixes: tuple[str, ...] = ()
+    skip_lines = set()
+    # ---- DIFF NORMALISATION ----
+    # PEP 621 is very particular about author/maintainer metadata conversion, so skip
+    skip_prefixes += ("Author:", "Author-email:", "Maintainer:", "Maintainer-email:")
+    # May be redundant with Home-page
+    skip_prefixes += ("Project-URL: Homepage,", "Home-page:")
+    # May be missing in original (relying on default) but backfilled in the TOML
+    skip_prefixes += ("Description-Content-Type:",)
+    # Remove empty lines
+    skip_lines.add("")
+
+    result = []
+    for line in pkg_file_txt.splitlines():
+        if line.startswith(skip_prefixes) or line in skip_lines:
+            continue
+        result.append(line + "\n")
+
+    return "".join(result)
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_expand.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_expand.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5710ec63d7d9d4ed7b709203bb2fc4b512f2093
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_expand.py
@@ -0,0 +1,247 @@
+import os
+import sys
+from pathlib import Path
+
+import pytest
+
+from setuptools._static import is_static
+from setuptools.config import expand
+from setuptools.discovery import find_package_path
+
+from distutils.errors import DistutilsOptionError
+
+
+def write_files(files, root_dir):
+    for file, content in files.items():
+        path = root_dir / file
+        path.parent.mkdir(exist_ok=True, parents=True)
+        path.write_text(content, encoding="utf-8")
+
+
+def test_glob_relative(tmp_path, monkeypatch):
+    files = {
+        "dir1/dir2/dir3/file1.txt",
+        "dir1/dir2/file2.txt",
+        "dir1/file3.txt",
+        "a.ini",
+        "b.ini",
+        "dir1/c.ini",
+        "dir1/dir2/a.ini",
+    }
+
+    write_files({k: "" for k in files}, tmp_path)
+    patterns = ["**/*.txt", "[ab].*", "**/[ac].ini"]
+    monkeypatch.chdir(tmp_path)
+    assert set(expand.glob_relative(patterns)) == files
+    # Make sure the same APIs work outside cwd
+    assert set(expand.glob_relative(patterns, tmp_path)) == files
+
+
+def test_read_files(tmp_path, monkeypatch):
+    dir_ = tmp_path / "dir_"
+    (tmp_path / "_dir").mkdir(exist_ok=True)
+    (tmp_path / "a.txt").touch()
+    files = {"a.txt": "a", "dir1/b.txt": "b", "dir1/dir2/c.txt": "c"}
+    write_files(files, dir_)
+
+    secrets = Path(str(dir_) + "secrets")
+    secrets.mkdir(exist_ok=True)
+    write_files({"secrets.txt": "secret keys"}, secrets)
+
+    with monkeypatch.context() as m:
+        m.chdir(dir_)
+        assert expand.read_files(list(files)) == "a\nb\nc"
+
+        cannot_access_msg = r"Cannot access '.*\.\..a\.txt'"
+        with pytest.raises(DistutilsOptionError, match=cannot_access_msg):
+            expand.read_files(["../a.txt"])
+
+        cannot_access_secrets_msg = r"Cannot access '.*secrets\.txt'"
+        with pytest.raises(DistutilsOptionError, match=cannot_access_secrets_msg):
+            expand.read_files(["../dir_secrets/secrets.txt"])
+
+    # Make sure the same APIs work outside cwd
+    assert expand.read_files(list(files), dir_) == "a\nb\nc"
+    with pytest.raises(DistutilsOptionError, match=cannot_access_msg):
+        expand.read_files(["../a.txt"], dir_)
+
+
+class TestReadAttr:
+    @pytest.mark.parametrize(
+        "example",
+        [
+            # No cookie means UTF-8:
+            b"__version__ = '\xc3\xa9'\nraise SystemExit(1)\n",
+            # If a cookie is present, honor it:
+            b"# -*- coding: utf-8 -*-\n__version__ = '\xc3\xa9'\nraise SystemExit(1)\n",
+            b"# -*- coding: latin1 -*-\n__version__ = '\xe9'\nraise SystemExit(1)\n",
+        ],
+    )
+    def test_read_attr_encoding_cookie(self, example, tmp_path):
+        (tmp_path / "mod.py").write_bytes(example)
+        assert expand.read_attr('mod.__version__', root_dir=tmp_path) == 'é'
+
+    def test_read_attr(self, tmp_path, monkeypatch):
+        files = {
+            "pkg/__init__.py": "",
+            "pkg/sub/__init__.py": "VERSION = '0.1.1'",
+            "pkg/sub/mod.py": (
+                "VALUES = {'a': 0, 'b': {42}, 'c': (0, 1, 1)}\nraise SystemExit(1)"
+            ),
+        }
+        write_files(files, tmp_path)
+
+        with monkeypatch.context() as m:
+            m.chdir(tmp_path)
+            # Make sure it can read the attr statically without evaluating the module
+            version = expand.read_attr('pkg.sub.VERSION')
+            values = expand.read_attr('lib.mod.VALUES', {'lib': 'pkg/sub'})
+
+        assert version == '0.1.1'
+        assert is_static(values)
+
+        assert values['a'] == 0
+        assert values['b'] == {42}
+        assert is_static(values)
+
+        # Make sure the same APIs work outside cwd
+        assert expand.read_attr('pkg.sub.VERSION', root_dir=tmp_path) == '0.1.1'
+        values = expand.read_attr('lib.mod.VALUES', {'lib': 'pkg/sub'}, tmp_path)
+        assert values['c'] == (0, 1, 1)
+
+    @pytest.mark.parametrize(
+        "example",
+        [
+            "VERSION: str\nVERSION = '0.1.1'\nraise SystemExit(1)\n",
+            "VERSION: str = '0.1.1'\nraise SystemExit(1)\n",
+        ],
+    )
+    def test_read_annotated_attr(self, tmp_path, example):
+        files = {
+            "pkg/__init__.py": "",
+            "pkg/sub/__init__.py": example,
+        }
+        write_files(files, tmp_path)
+        # Make sure this attribute can be read statically
+        version = expand.read_attr('pkg.sub.VERSION', root_dir=tmp_path)
+        assert version == '0.1.1'
+        assert is_static(version)
+
+    @pytest.mark.parametrize(
+        "example",
+        [
+            "VERSION = (lambda: '0.1.1')()\n",
+            "def fn(): return '0.1.1'\nVERSION = fn()\n",
+            "VERSION: str = (lambda: '0.1.1')()\n",
+        ],
+    )
+    def test_read_dynamic_attr(self, tmp_path, monkeypatch, example):
+        files = {
+            "pkg/__init__.py": "",
+            "pkg/sub/__init__.py": example,
+        }
+        write_files(files, tmp_path)
+        monkeypatch.chdir(tmp_path)
+        version = expand.read_attr('pkg.sub.VERSION')
+        assert version == '0.1.1'
+        assert not is_static(version)
+
+    def test_import_order(self, tmp_path):
+        """
+        Sometimes the import machinery will import the parent package of a nested
+        module, which triggers side-effects and might create problems (see issue #3176)
+
+        ``read_attr`` should bypass these limitations by resolving modules statically
+        (via ast.literal_eval).
+        """
+        files = {
+            "src/pkg/__init__.py": "from .main import func\nfrom .about import version",
+            "src/pkg/main.py": "import super_complicated_dep\ndef func(): return 42",
+            "src/pkg/about.py": "version = '42'",
+        }
+        write_files(files, tmp_path)
+        attr_desc = "pkg.about.version"
+        package_dir = {"": "src"}
+        # `import super_complicated_dep` should not run, otherwise the build fails
+        assert expand.read_attr(attr_desc, package_dir, tmp_path) == "42"
+
+
+@pytest.mark.parametrize(
+    ("package_dir", "file", "module", "return_value"),
+    [
+        ({"": "src"}, "src/pkg/main.py", "pkg.main", 42),
+        ({"pkg": "lib"}, "lib/main.py", "pkg.main", 13),
+        ({}, "single_module.py", "single_module", 70),
+        ({}, "flat_layout/pkg.py", "flat_layout.pkg", 836),
+    ],
+)
+def test_resolve_class(monkeypatch, tmp_path, package_dir, file, module, return_value):
+    monkeypatch.setattr(sys, "modules", {})  # reproducibility
+    files = {file: f"class Custom:\n    def testing(self): return {return_value}"}
+    write_files(files, tmp_path)
+    cls = expand.resolve_class(f"{module}.Custom", package_dir, tmp_path)
+    assert cls().testing() == return_value
+
+
+@pytest.mark.parametrize(
+    ("args", "pkgs"),
+    [
+        ({"where": ["."], "namespaces": False}, {"pkg", "other"}),
+        ({"where": [".", "dir1"], "namespaces": False}, {"pkg", "other", "dir2"}),
+        ({"namespaces": True}, {"pkg", "other", "dir1", "dir1.dir2"}),
+        ({}, {"pkg", "other", "dir1", "dir1.dir2"}),  # default value for `namespaces`
+    ],
+)
+def test_find_packages(tmp_path, args, pkgs):
+    files = {
+        "pkg/__init__.py",
+        "other/__init__.py",
+        "dir1/dir2/__init__.py",
+    }
+    write_files({k: "" for k in files}, tmp_path)
+
+    package_dir = {}
+    kwargs = {"root_dir": tmp_path, "fill_package_dir": package_dir, **args}
+    where = kwargs.get("where", ["."])
+    assert set(expand.find_packages(**kwargs)) == pkgs
+    for pkg in pkgs:
+        pkg_path = find_package_path(pkg, package_dir, tmp_path)
+        assert os.path.exists(pkg_path)
+
+    # Make sure the same APIs work outside cwd
+    where = [
+        str((tmp_path / p).resolve()).replace(os.sep, "/")  # ensure posix-style paths
+        for p in args.pop("where", ["."])
+    ]
+
+    assert set(expand.find_packages(where=where, **args)) == pkgs
+
+
+@pytest.mark.parametrize(
+    ("files", "where", "expected_package_dir"),
+    [
+        (["pkg1/__init__.py", "pkg1/other.py"], ["."], {}),
+        (["pkg1/__init__.py", "pkg2/__init__.py"], ["."], {}),
+        (["src/pkg1/__init__.py", "src/pkg1/other.py"], ["src"], {"": "src"}),
+        (["src/pkg1/__init__.py", "src/pkg2/__init__.py"], ["src"], {"": "src"}),
+        (
+            ["src1/pkg1/__init__.py", "src2/pkg2/__init__.py"],
+            ["src1", "src2"],
+            {"pkg1": "src1/pkg1", "pkg2": "src2/pkg2"},
+        ),
+        (
+            ["src/pkg1/__init__.py", "pkg2/__init__.py"],
+            ["src", "."],
+            {"pkg1": "src/pkg1"},
+        ),
+    ],
+)
+def test_fill_package_dir(tmp_path, files, where, expected_package_dir):
+    write_files({k: "" for k in files}, tmp_path)
+    pkg_dir = {}
+    kwargs = {"root_dir": tmp_path, "fill_package_dir": pkg_dir, "namespaces": False}
+    pkgs = expand.find_packages(where=where, **kwargs)
+    assert set(pkg_dir.items()) == set(expected_package_dir.items())
+    for pkg in pkgs:
+        pkg_path = find_package_path(pkg, pkg_dir, tmp_path)
+        assert os.path.exists(pkg_path)
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_pyprojecttoml.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_pyprojecttoml.py
new file mode 100644
index 0000000000000000000000000000000000000000..db40fcd23da137dfb2f4ddde7ffb722193ee83e2
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_pyprojecttoml.py
@@ -0,0 +1,396 @@
+import re
+from configparser import ConfigParser
+from inspect import cleandoc
+
+import jaraco.path
+import pytest
+import tomli_w
+from path import Path
+
+import setuptools  # noqa: F401 # force distutils.core to be patched
+from setuptools.config.pyprojecttoml import (
+    _ToolsTypoInMetadata,
+    apply_configuration,
+    expand_configuration,
+    read_configuration,
+    validate,
+)
+from setuptools.dist import Distribution
+from setuptools.errors import OptionError
+
+import distutils.core
+
+EXAMPLE = """
+[project]
+name = "myproj"
+keywords = ["some", "key", "words"]
+dynamic = ["version", "readme"]
+requires-python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+dependencies = [
+    'importlib-metadata>=0.12;python_version<"3.8"',
+    'importlib-resources>=1.0;python_version<"3.7"',
+    'pathlib2>=2.3.3,<3;python_version < "3.4" and sys.platform != "win32"',
+]
+
+[project.optional-dependencies]
+docs = [
+    "sphinx>=3",
+    "sphinx-argparse>=0.2.5",
+    "sphinx-rtd-theme>=0.4.3",
+]
+testing = [
+    "pytest>=1",
+    "coverage>=3,<5",
+]
+
+[project.scripts]
+exec = "pkg.__main__:exec"
+
+[build-system]
+requires = ["setuptools", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[tool.setuptools]
+package-dir = {"" = "src"}
+zip-safe = true
+platforms = ["any"]
+
+[tool.setuptools.packages.find]
+where = ["src"]
+
+[tool.setuptools.cmdclass]
+sdist = "pkg.mod.CustomSdist"
+
+[tool.setuptools.dynamic.version]
+attr = "pkg.__version__.VERSION"
+
+[tool.setuptools.dynamic.readme]
+file = ["README.md"]
+content-type = "text/markdown"
+
+[tool.setuptools.package-data]
+"*" = ["*.txt"]
+
+[tool.setuptools.data-files]
+"data" = ["_files/*.txt"]
+
+[tool.distutils.sdist]
+formats = "gztar"
+
+[tool.distutils.bdist_wheel]
+universal = true
+"""
+
+
+def create_example(path, pkg_root):
+    files = {
+        "pyproject.toml": EXAMPLE,
+        "README.md": "hello world",
+        "_files": {
+            "file.txt": "",
+        },
+    }
+    packages = {
+        "pkg": {
+            "__init__.py": "",
+            "mod.py": "class CustomSdist: pass",
+            "__version__.py": "VERSION = (3, 10)",
+            "__main__.py": "def exec(): print('hello')",
+        },
+    }
+
+    assert pkg_root  # Meta-test: cannot be empty string.
+
+    if pkg_root == ".":
+        files = {**files, **packages}
+        # skip other files: flat-layout will raise error for multi-package dist
+    else:
+        # Use this opportunity to ensure namespaces are discovered
+        files[pkg_root] = {**packages, "other": {"nested": {"__init__.py": ""}}}
+
+    jaraco.path.build(files, prefix=path)
+
+
+def verify_example(config, path, pkg_root):
+    pyproject = path / "pyproject.toml"
+    pyproject.write_text(tomli_w.dumps(config), encoding="utf-8")
+    expanded = expand_configuration(config, path)
+    expanded_project = expanded["project"]
+    assert read_configuration(pyproject, expand=True) == expanded
+    assert expanded_project["version"] == "3.10"
+    assert expanded_project["readme"]["text"] == "hello world"
+    assert "packages" in expanded["tool"]["setuptools"]
+    if pkg_root == ".":
+        # Auto-discovery will raise error for multi-package dist
+        assert set(expanded["tool"]["setuptools"]["packages"]) == {"pkg"}
+    else:
+        assert set(expanded["tool"]["setuptools"]["packages"]) == {
+            "pkg",
+            "other",
+            "other.nested",
+        }
+    assert expanded["tool"]["setuptools"]["include-package-data"] is True
+    assert "" in expanded["tool"]["setuptools"]["package-data"]
+    assert "*" not in expanded["tool"]["setuptools"]["package-data"]
+    assert expanded["tool"]["setuptools"]["data-files"] == [
+        ("data", ["_files/file.txt"])
+    ]
+
+
+def test_read_configuration(tmp_path):
+    create_example(tmp_path, "src")
+    pyproject = tmp_path / "pyproject.toml"
+
+    config = read_configuration(pyproject, expand=False)
+    assert config["project"].get("version") is None
+    assert config["project"].get("readme") is None
+
+    verify_example(config, tmp_path, "src")
+
+
+@pytest.mark.parametrize(
+    ("pkg_root", "opts"),
+    [
+        (".", {}),
+        ("src", {}),
+        ("lib", {"packages": {"find": {"where": ["lib"]}}}),
+    ],
+)
+def test_discovered_package_dir_with_attr_directive_in_config(tmp_path, pkg_root, opts):
+    create_example(tmp_path, pkg_root)
+
+    pyproject = tmp_path / "pyproject.toml"
+
+    config = read_configuration(pyproject, expand=False)
+    assert config["project"].get("version") is None
+    assert config["project"].get("readme") is None
+    config["tool"]["setuptools"].pop("packages", None)
+    config["tool"]["setuptools"].pop("package-dir", None)
+
+    config["tool"]["setuptools"].update(opts)
+    verify_example(config, tmp_path, pkg_root)
+
+
+ENTRY_POINTS = {
+    "console_scripts": {"a": "mod.a:func"},
+    "gui_scripts": {"b": "mod.b:func"},
+    "other": {"c": "mod.c:func [extra]"},
+}
+
+
+class TestEntryPoints:
+    def write_entry_points(self, tmp_path):
+        entry_points = ConfigParser()
+        entry_points.read_dict(ENTRY_POINTS)
+        with open(tmp_path / "entry-points.txt", "w", encoding="utf-8") as f:
+            entry_points.write(f)
+
+    def pyproject(self, dynamic=None):
+        project = {"dynamic": dynamic or ["scripts", "gui-scripts", "entry-points"]}
+        tool = {"dynamic": {"entry-points": {"file": "entry-points.txt"}}}
+        return {"project": project, "tool": {"setuptools": tool}}
+
+    def test_all_listed_in_dynamic(self, tmp_path):
+        self.write_entry_points(tmp_path)
+        expanded = expand_configuration(self.pyproject(), tmp_path)
+        expanded_project = expanded["project"]
+        assert len(expanded_project["scripts"]) == 1
+        assert expanded_project["scripts"]["a"] == "mod.a:func"
+        assert len(expanded_project["gui-scripts"]) == 1
+        assert expanded_project["gui-scripts"]["b"] == "mod.b:func"
+        assert len(expanded_project["entry-points"]) == 1
+        assert expanded_project["entry-points"]["other"]["c"] == "mod.c:func [extra]"
+
+    @pytest.mark.parametrize("missing_dynamic", ("scripts", "gui-scripts"))
+    def test_scripts_not_listed_in_dynamic(self, tmp_path, missing_dynamic):
+        self.write_entry_points(tmp_path)
+        dynamic = {"scripts", "gui-scripts", "entry-points"} - {missing_dynamic}
+
+        msg = f"defined outside of `pyproject.toml`:.*{missing_dynamic}"
+        with pytest.raises(OptionError, match=re.compile(msg, re.S)):
+            expand_configuration(self.pyproject(dynamic), tmp_path)
+
+
+class TestClassifiers:
+    def test_dynamic(self, tmp_path):
+        # Let's create a project example that has dynamic classifiers
+        # coming from a txt file.
+        create_example(tmp_path, "src")
+        classifiers = cleandoc(
+            """
+            Framework :: Flask
+            Programming Language :: Haskell
+            """
+        )
+        (tmp_path / "classifiers.txt").write_text(classifiers, encoding="utf-8")
+
+        pyproject = tmp_path / "pyproject.toml"
+        config = read_configuration(pyproject, expand=False)
+        dynamic = config["project"]["dynamic"]
+        config["project"]["dynamic"] = list({*dynamic, "classifiers"})
+        dynamic_config = config["tool"]["setuptools"]["dynamic"]
+        dynamic_config["classifiers"] = {"file": "classifiers.txt"}
+
+        # When the configuration is expanded,
+        # each line of the file should be an different classifier.
+        validate(config, pyproject)
+        expanded = expand_configuration(config, tmp_path)
+
+        assert set(expanded["project"]["classifiers"]) == {
+            "Framework :: Flask",
+            "Programming Language :: Haskell",
+        }
+
+    def test_dynamic_without_config(self, tmp_path):
+        config = """
+        [project]
+        name = "myproj"
+        version = '42'
+        dynamic = ["classifiers"]
+        """
+
+        pyproject = tmp_path / "pyproject.toml"
+        pyproject.write_text(cleandoc(config), encoding="utf-8")
+        with pytest.raises(OptionError, match="No configuration .* .classifiers."):
+            read_configuration(pyproject)
+
+    def test_dynamic_readme_from_setup_script_args(self, tmp_path):
+        config = """
+        [project]
+        name = "myproj"
+        version = '42'
+        dynamic = ["readme"]
+        """
+        pyproject = tmp_path / "pyproject.toml"
+        pyproject.write_text(cleandoc(config), encoding="utf-8")
+        dist = Distribution(attrs={"long_description": "42"})
+        # No error should occur because of missing `readme`
+        dist = apply_configuration(dist, pyproject)
+        assert dist.metadata.long_description == "42"
+
+    def test_dynamic_without_file(self, tmp_path):
+        config = """
+        [project]
+        name = "myproj"
+        version = '42'
+        dynamic = ["classifiers"]
+
+        [tool.setuptools.dynamic]
+        classifiers = {file = ["classifiers.txt"]}
+        """
+
+        pyproject = tmp_path / "pyproject.toml"
+        pyproject.write_text(cleandoc(config), encoding="utf-8")
+        with pytest.warns(UserWarning, match="File .*classifiers.txt. cannot be found"):
+            expanded = read_configuration(pyproject)
+        assert "classifiers" not in expanded["project"]
+
+
+@pytest.mark.parametrize(
+    "example",
+    (
+        """
+        [project]
+        name = "myproj"
+        version = "1.2"
+
+        [my-tool.that-disrespect.pep518]
+        value = 42
+        """,
+    ),
+)
+def test_ignore_unrelated_config(tmp_path, example):
+    pyproject = tmp_path / "pyproject.toml"
+    pyproject.write_text(cleandoc(example), encoding="utf-8")
+
+    # Make sure no error is raised due to 3rd party configs in pyproject.toml
+    assert read_configuration(pyproject) is not None
+
+
+@pytest.mark.parametrize(
+    ("example", "error_msg"),
+    [
+        (
+            """
+            [project]
+            name = "myproj"
+            version = "1.2"
+            requires = ['pywin32; platform_system=="Windows"' ]
+            """,
+            "configuration error: .project. must not contain ..requires.. properties",
+        ),
+    ],
+)
+def test_invalid_example(tmp_path, example, error_msg):
+    pyproject = tmp_path / "pyproject.toml"
+    pyproject.write_text(cleandoc(example), encoding="utf-8")
+
+    pattern = re.compile(f"invalid pyproject.toml.*{error_msg}.*", re.M | re.S)
+    with pytest.raises(ValueError, match=pattern):
+        read_configuration(pyproject)
+
+
+@pytest.mark.parametrize("config", ("", "[tool.something]\nvalue = 42"))
+def test_empty(tmp_path, config):
+    pyproject = tmp_path / "pyproject.toml"
+    pyproject.write_text(config, encoding="utf-8")
+
+    # Make sure no error is raised
+    assert read_configuration(pyproject) == {}
+
+
+@pytest.mark.parametrize("config", ("[project]\nname = 'myproj'\nversion='42'\n",))
+def test_include_package_data_by_default(tmp_path, config):
+    """Builds with ``pyproject.toml`` should consider ``include-package-data=True`` as
+    default.
+    """
+    pyproject = tmp_path / "pyproject.toml"
+    pyproject.write_text(config, encoding="utf-8")
+
+    config = read_configuration(pyproject)
+    assert config["tool"]["setuptools"]["include-package-data"] is True
+
+
+def test_include_package_data_in_setuppy(tmp_path):
+    """Builds with ``pyproject.toml`` should consider ``include_package_data`` set in
+    ``setup.py``.
+
+    See https://github.com/pypa/setuptools/issues/3197#issuecomment-1079023889
+    """
+    files = {
+        "pyproject.toml": "[project]\nname = 'myproj'\nversion='42'\n",
+        "setup.py": "__import__('setuptools').setup(include_package_data=False)",
+    }
+    jaraco.path.build(files, prefix=tmp_path)
+
+    with Path(tmp_path):
+        dist = distutils.core.run_setup("setup.py", {}, stop_after="config")
+
+    assert dist.get_name() == "myproj"
+    assert dist.get_version() == "42"
+    assert dist.include_package_data is False
+
+
+def test_warn_tools_typo(tmp_path):
+    """Test that the common ``tools.setuptools`` typo in ``pyproject.toml`` issues a warning
+
+    See https://github.com/pypa/setuptools/issues/4150
+    """
+    config = """
+    [build-system]
+    requires = ["setuptools"]
+    build-backend = "setuptools.build_meta"
+
+    [project]
+    name = "myproj"
+    version = '42'
+
+    [tools.setuptools]
+    packages = ["package"]
+    """
+
+    pyproject = tmp_path / "pyproject.toml"
+    pyproject.write_text(cleandoc(config), encoding="utf-8")
+
+    with pytest.warns(_ToolsTypoInMetadata):
+        read_configuration(pyproject)
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_pyprojecttoml_dynamic_deps.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_pyprojecttoml_dynamic_deps.py
new file mode 100644
index 0000000000000000000000000000000000000000..e42f28ffaaf5270a74fe9f03885b75bb91858030
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_pyprojecttoml_dynamic_deps.py
@@ -0,0 +1,109 @@
+from inspect import cleandoc
+
+import pytest
+from jaraco import path
+
+from setuptools.config.pyprojecttoml import apply_configuration
+from setuptools.dist import Distribution
+from setuptools.warnings import SetuptoolsWarning
+
+
+def test_dynamic_dependencies(tmp_path):
+    files = {
+        "requirements.txt": "six\n  # comment\n",
+        "pyproject.toml": cleandoc(
+            """
+            [project]
+            name = "myproj"
+            version = "1.0"
+            dynamic = ["dependencies"]
+
+            [build-system]
+            requires = ["setuptools", "wheel"]
+            build-backend = "setuptools.build_meta"
+
+            [tool.setuptools.dynamic.dependencies]
+            file = ["requirements.txt"]
+            """
+        ),
+    }
+    path.build(files, prefix=tmp_path)
+    dist = Distribution()
+    dist = apply_configuration(dist, tmp_path / "pyproject.toml")
+    assert dist.install_requires == ["six"]
+
+
+def test_dynamic_optional_dependencies(tmp_path):
+    files = {
+        "requirements-docs.txt": "sphinx\n  # comment\n",
+        "pyproject.toml": cleandoc(
+            """
+            [project]
+            name = "myproj"
+            version = "1.0"
+            dynamic = ["optional-dependencies"]
+
+            [tool.setuptools.dynamic.optional-dependencies.docs]
+            file = ["requirements-docs.txt"]
+
+            [build-system]
+            requires = ["setuptools", "wheel"]
+            build-backend = "setuptools.build_meta"
+            """
+        ),
+    }
+    path.build(files, prefix=tmp_path)
+    dist = Distribution()
+    dist = apply_configuration(dist, tmp_path / "pyproject.toml")
+    assert dist.extras_require == {"docs": ["sphinx"]}
+
+
+def test_mixed_dynamic_optional_dependencies(tmp_path):
+    """
+    Test that if PEP 621 was loosened to allow mixing of dynamic and static
+    configurations in the case of fields containing sub-fields (groups),
+    things would work out.
+    """
+    files = {
+        "requirements-images.txt": "pillow~=42.0\n  # comment\n",
+        "pyproject.toml": cleandoc(
+            """
+            [project]
+            name = "myproj"
+            version = "1.0"
+            dynamic = ["optional-dependencies"]
+
+            [project.optional-dependencies]
+            docs = ["sphinx"]
+
+            [tool.setuptools.dynamic.optional-dependencies.images]
+            file = ["requirements-images.txt"]
+            """
+        ),
+    }
+
+    path.build(files, prefix=tmp_path)
+    pyproject = tmp_path / "pyproject.toml"
+    with pytest.raises(ValueError, match="project.optional-dependencies"):
+        apply_configuration(Distribution(), pyproject)
+
+
+def test_mixed_extras_require_optional_dependencies(tmp_path):
+    files = {
+        "pyproject.toml": cleandoc(
+            """
+            [project]
+            name = "myproj"
+            version = "1.0"
+            optional-dependencies.docs = ["sphinx"]
+            """
+        ),
+    }
+
+    path.build(files, prefix=tmp_path)
+    pyproject = tmp_path / "pyproject.toml"
+
+    with pytest.warns(SetuptoolsWarning, match=".extras_require. overwritten"):
+        dist = Distribution({"extras_require": {"hello": ["world"]}})
+        dist = apply_configuration(dist, pyproject)
+        assert dist.extras_require == {"docs": ["sphinx"]}
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_setupcfg.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_setupcfg.py
new file mode 100644
index 0000000000000000000000000000000000000000..adadc02da34751d86fa2e7d2fa61b5153eba19c5
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/config/test_setupcfg.py
@@ -0,0 +1,965 @@
+import configparser
+import contextlib
+import inspect
+from pathlib import Path
+from unittest.mock import Mock, patch
+
+import pytest
+from packaging.requirements import InvalidRequirement
+
+from setuptools.config.setupcfg import ConfigHandler, Target, read_configuration
+from setuptools.dist import Distribution, _Distribution
+from setuptools.warnings import SetuptoolsDeprecationWarning
+
+from ..textwrap import DALS
+
+from distutils.errors import DistutilsFileError, DistutilsOptionError
+
+
+class ErrConfigHandler(ConfigHandler[Target]):
+    """Erroneous handler. Fails to implement required methods."""
+
+    section_prefix = "**err**"
+
+
+def make_package_dir(name, base_dir, ns=False):
+    dir_package = base_dir
+    for dir_name in name.split('/'):
+        dir_package = dir_package.mkdir(dir_name)
+    init_file = None
+    if not ns:
+        init_file = dir_package.join('__init__.py')
+        init_file.write('')
+    return dir_package, init_file
+
+
+def fake_env(
+    tmpdir, setup_cfg, setup_py=None, encoding='ascii', package_path='fake_package'
+):
+    if setup_py is None:
+        setup_py = 'from setuptools import setup\nsetup()\n'
+
+    tmpdir.join('setup.py').write(setup_py)
+    config = tmpdir.join('setup.cfg')
+    config.write(setup_cfg.encode(encoding), mode='wb')
+
+    package_dir, init_file = make_package_dir(package_path, tmpdir)
+
+    init_file.write(
+        'VERSION = (1, 2, 3)\n'
+        '\n'
+        'VERSION_MAJOR = 1'
+        '\n'
+        'def get_version():\n'
+        '    return [3, 4, 5, "dev"]\n'
+        '\n'
+    )
+
+    return package_dir, config
+
+
+@contextlib.contextmanager
+def get_dist(tmpdir, kwargs_initial=None, parse=True):
+    kwargs_initial = kwargs_initial or {}
+
+    with tmpdir.as_cwd():
+        dist = Distribution(kwargs_initial)
+        dist.script_name = 'setup.py'
+        parse and dist.parse_config_files()
+
+        yield dist
+
+
+def test_parsers_implemented():
+    with pytest.raises(NotImplementedError):
+        handler = ErrConfigHandler(None, {}, False, Mock())
+        handler.parsers
+
+
+class TestConfigurationReader:
+    def test_basic(self, tmpdir):
+        _, config = fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = 10.1.1\n'
+            'keywords = one, two\n'
+            '\n'
+            '[options]\n'
+            'scripts = bin/a.py, bin/b.py\n',
+        )
+        config_dict = read_configuration(str(config))
+        assert config_dict['metadata']['version'] == '10.1.1'
+        assert config_dict['metadata']['keywords'] == ['one', 'two']
+        assert config_dict['options']['scripts'] == ['bin/a.py', 'bin/b.py']
+
+    def test_no_config(self, tmpdir):
+        with pytest.raises(DistutilsFileError):
+            read_configuration(str(tmpdir.join('setup.cfg')))
+
+    def test_ignore_errors(self, tmpdir):
+        _, config = fake_env(
+            tmpdir,
+            '[metadata]\nversion = attr: none.VERSION\nkeywords = one, two\n',
+        )
+        with pytest.raises(ImportError):
+            read_configuration(str(config))
+
+        config_dict = read_configuration(str(config), ignore_option_errors=True)
+
+        assert config_dict['metadata']['keywords'] == ['one', 'two']
+        assert 'version' not in config_dict['metadata']
+
+        config.remove()
+
+
+class TestMetadata:
+    def test_basic(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = 10.1.1\n'
+            'description = Some description\n'
+            'long_description_content_type = text/something\n'
+            'long_description = file: README\n'
+            'name = fake_name\n'
+            'keywords = one, two\n'
+            'provides = package, package.sub\n'
+            'license = otherlic\n'
+            'download_url = http://test.test.com/test/\n'
+            'maintainer_email = test@test.com\n',
+        )
+
+        tmpdir.join('README').write('readme contents\nline2')
+
+        meta_initial = {
+            # This will be used so `otherlic` won't replace it.
+            'license': 'BSD 3-Clause License',
+        }
+
+        with get_dist(tmpdir, meta_initial) as dist:
+            metadata = dist.metadata
+
+            assert metadata.version == '10.1.1'
+            assert metadata.description == 'Some description'
+            assert metadata.long_description_content_type == 'text/something'
+            assert metadata.long_description == 'readme contents\nline2'
+            assert metadata.provides == ['package', 'package.sub']
+            assert metadata.license == 'BSD 3-Clause License'
+            assert metadata.name == 'fake_name'
+            assert metadata.keywords == ['one', 'two']
+            assert metadata.download_url == 'http://test.test.com/test/'
+            assert metadata.maintainer_email == 'test@test.com'
+
+    def test_license_cfg(self, tmpdir):
+        fake_env(
+            tmpdir,
+            DALS(
+                """
+            [metadata]
+            name=foo
+            version=0.0.1
+            license=Apache 2.0
+            """
+            ),
+        )
+
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+
+            assert metadata.name == "foo"
+            assert metadata.version == "0.0.1"
+            assert metadata.license == "Apache 2.0"
+
+    def test_file_mixed(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\nlong_description = file: README.rst, CHANGES.rst\n\n',
+        )
+
+        tmpdir.join('README.rst').write('readme contents\nline2')
+        tmpdir.join('CHANGES.rst').write('changelog contents\nand stuff')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.long_description == (
+                'readme contents\nline2\nchangelog contents\nand stuff'
+            )
+
+    def test_file_sandboxed(self, tmpdir):
+        tmpdir.ensure("README")
+        project = tmpdir.join('depth1', 'depth2')
+        project.ensure(dir=True)
+        fake_env(project, '[metadata]\nlong_description = file: ../../README\n')
+
+        with get_dist(project, parse=False) as dist:
+            with pytest.raises(DistutilsOptionError):
+                dist.parse_config_files()  # file: out of sandbox
+
+    def test_aliases(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'author_email = test@test.com\n'
+            'home_page = http://test.test.com/test/\n'
+            'summary = Short summary\n'
+            'platform = a, b\n'
+            'classifier =\n'
+            '  Framework :: Django\n'
+            '  Programming Language :: Python :: 3.5\n',
+        )
+
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+            assert metadata.author_email == 'test@test.com'
+            assert metadata.url == 'http://test.test.com/test/'
+            assert metadata.description == 'Short summary'
+            assert metadata.platforms == ['a', 'b']
+            assert metadata.classifiers == [
+                'Framework :: Django',
+                'Programming Language :: Python :: 3.5',
+            ]
+
+    def test_multiline(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'name = fake_name\n'
+            'keywords =\n'
+            '  one\n'
+            '  two\n'
+            'classifiers =\n'
+            '  Framework :: Django\n'
+            '  Programming Language :: Python :: 3.5\n',
+        )
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+            assert metadata.keywords == ['one', 'two']
+            assert metadata.classifiers == [
+                'Framework :: Django',
+                'Programming Language :: Python :: 3.5',
+            ]
+
+    def test_dict(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'project_urls =\n'
+            '  Link One = https://example.com/one/\n'
+            '  Link Two = https://example.com/two/\n',
+        )
+        with get_dist(tmpdir) as dist:
+            metadata = dist.metadata
+            assert metadata.project_urls == {
+                'Link One': 'https://example.com/one/',
+                'Link Two': 'https://example.com/two/',
+            }
+
+    def test_version(self, tmpdir):
+        package_dir, config = fake_env(
+            tmpdir, '[metadata]\nversion = attr: fake_package.VERSION\n'
+        )
+
+        sub_a = package_dir.mkdir('subpkg_a')
+        sub_a.join('__init__.py').write('')
+        sub_a.join('mod.py').write('VERSION = (2016, 11, 26)')
+
+        sub_b = package_dir.mkdir('subpkg_b')
+        sub_b.join('__init__.py').write('')
+        sub_b.join('mod.py').write(
+            'import third_party_module\nVERSION = (2016, 11, 26)'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1.2.3'
+
+        config.write('[metadata]\nversion = attr: fake_package.get_version\n')
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '3.4.5.dev'
+
+        config.write('[metadata]\nversion = attr: fake_package.VERSION_MAJOR\n')
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1'
+
+        config.write('[metadata]\nversion = attr: fake_package.subpkg_a.mod.VERSION\n')
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '2016.11.26'
+
+        config.write('[metadata]\nversion = attr: fake_package.subpkg_b.mod.VERSION\n')
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '2016.11.26'
+
+    def test_version_file(self, tmpdir):
+        fake_env(tmpdir, '[metadata]\nversion = file: fake_package/version.txt\n')
+        tmpdir.join('fake_package', 'version.txt').write('1.2.3\n')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1.2.3'
+
+        tmpdir.join('fake_package', 'version.txt').write('1.2.3\n4.5.6\n')
+        with pytest.raises(DistutilsOptionError):
+            with get_dist(tmpdir) as dist:
+                dist.metadata.version
+
+    def test_version_with_package_dir_simple(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = attr: fake_package_simple.VERSION\n'
+            '[options]\n'
+            'package_dir =\n'
+            '    = src\n',
+            package_path='src/fake_package_simple',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1.2.3'
+
+    def test_version_with_package_dir_rename(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = attr: fake_package_rename.VERSION\n'
+            '[options]\n'
+            'package_dir =\n'
+            '    fake_package_rename = fake_dir\n',
+            package_path='fake_dir',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1.2.3'
+
+    def test_version_with_package_dir_complex(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'version = attr: fake_package_complex.VERSION\n'
+            '[options]\n'
+            'package_dir =\n'
+            '    fake_package_complex = src/fake_dir\n',
+            package_path='src/fake_dir',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.version == '1.2.3'
+
+    def test_unknown_meta_item(self, tmpdir):
+        fake_env(tmpdir, '[metadata]\nname = fake_name\nunknown = some\n')
+        with get_dist(tmpdir, parse=False) as dist:
+            dist.parse_config_files()  # Skip unknown.
+
+    def test_usupported_section(self, tmpdir):
+        fake_env(tmpdir, '[metadata.some]\nkey = val\n')
+        with get_dist(tmpdir, parse=False) as dist:
+            with pytest.raises(DistutilsOptionError):
+                dist.parse_config_files()
+
+    def test_classifiers(self, tmpdir):
+        expected = set([
+            'Framework :: Django',
+            'Programming Language :: Python :: 3',
+            'Programming Language :: Python :: 3.5',
+        ])
+
+        # From file.
+        _, config = fake_env(tmpdir, '[metadata]\nclassifiers = file: classifiers\n')
+
+        tmpdir.join('classifiers').write(
+            'Framework :: Django\n'
+            'Programming Language :: Python :: 3\n'
+            'Programming Language :: Python :: 3.5\n'
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert set(dist.metadata.classifiers) == expected
+
+        # From list notation
+        config.write(
+            '[metadata]\n'
+            'classifiers =\n'
+            '    Framework :: Django\n'
+            '    Programming Language :: Python :: 3\n'
+            '    Programming Language :: Python :: 3.5\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert set(dist.metadata.classifiers) == expected
+
+    def test_interpolation(self, tmpdir):
+        fake_env(tmpdir, '[metadata]\ndescription = %(message)s\n')
+        with pytest.raises(configparser.InterpolationMissingOptionError):
+            with get_dist(tmpdir):
+                pass
+
+    def test_non_ascii_1(self, tmpdir):
+        fake_env(tmpdir, '[metadata]\ndescription = éàïôñ\n', encoding='utf-8')
+        with get_dist(tmpdir):
+            pass
+
+    def test_non_ascii_3(self, tmpdir):
+        fake_env(tmpdir, '\n# -*- coding: invalid\n')
+        with get_dist(tmpdir):
+            pass
+
+    def test_non_ascii_4(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '# -*- coding: utf-8\n[metadata]\ndescription = éàïôñ\n',
+            encoding='utf-8',
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.metadata.description == 'éàïôñ'
+
+    def test_not_utf8(self, tmpdir):
+        """
+        Config files encoded not in UTF-8 will fail
+        """
+        fake_env(
+            tmpdir,
+            '# vim: set fileencoding=iso-8859-15 :\n[metadata]\ndescription = éàïôñ\n',
+            encoding='iso-8859-15',
+        )
+        with pytest.raises(UnicodeDecodeError):
+            with get_dist(tmpdir):
+                pass
+
+    def test_warn_dash_deprecation(self, tmpdir):
+        # warn_dash_deprecation() is a method in setuptools.dist
+        # remove this test and the method when no longer needed
+        fake_env(
+            tmpdir,
+            '[metadata]\n'
+            'author-email = test@test.com\n'
+            'maintainer_email = foo@foo.com\n',
+        )
+        msg = "Usage of dash-separated 'author-email' will not be supported"
+        with pytest.warns(SetuptoolsDeprecationWarning, match=msg):
+            with get_dist(tmpdir) as dist:
+                metadata = dist.metadata
+
+        assert metadata.author_email == 'test@test.com'
+        assert metadata.maintainer_email == 'foo@foo.com'
+
+    def test_make_option_lowercase(self, tmpdir):
+        # remove this test and the method make_option_lowercase() in setuptools.dist
+        # when no longer needed
+        fake_env(tmpdir, '[metadata]\nName = foo\ndescription = Some description\n')
+        msg = "Usage of uppercase key 'Name' in 'metadata' will not be supported"
+        with pytest.warns(SetuptoolsDeprecationWarning, match=msg):
+            with get_dist(tmpdir) as dist:
+                metadata = dist.metadata
+
+        assert metadata.name == 'foo'
+        assert metadata.description == 'Some description'
+
+
+class TestOptions:
+    def test_basic(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options]\n'
+            'zip_safe = True\n'
+            'include_package_data = yes\n'
+            'package_dir = b=c, =src\n'
+            'packages = pack_a, pack_b.subpack\n'
+            'namespace_packages = pack1, pack2\n'
+            'scripts = bin/one.py, bin/two.py\n'
+            'eager_resources = bin/one.py, bin/two.py\n'
+            'install_requires = docutils>=0.3; pack ==1.1, ==1.3; hey\n'
+            'setup_requires = docutils>=0.3; spack ==1.1, ==1.3; there\n'
+            'dependency_links = http://some.com/here/1, '
+            'http://some.com/there/2\n'
+            'python_requires = >=1.0, !=2.8\n'
+            'py_modules = module1, module2\n',
+        )
+        deprec = pytest.warns(SetuptoolsDeprecationWarning, match="namespace_packages")
+        with deprec, get_dist(tmpdir) as dist:
+            assert dist.zip_safe
+            assert dist.include_package_data
+            assert dist.package_dir == {'': 'src', 'b': 'c'}
+            assert dist.packages == ['pack_a', 'pack_b.subpack']
+            assert dist.namespace_packages == ['pack1', 'pack2']
+            assert dist.scripts == ['bin/one.py', 'bin/two.py']
+            assert dist.dependency_links == ([
+                'http://some.com/here/1',
+                'http://some.com/there/2',
+            ])
+            assert dist.install_requires == ([
+                'docutils>=0.3',
+                'pack==1.1,==1.3',
+                'hey',
+            ])
+            assert dist.setup_requires == ([
+                'docutils>=0.3',
+                'spack ==1.1, ==1.3',
+                'there',
+            ])
+            assert dist.python_requires == '>=1.0, !=2.8'
+            assert dist.py_modules == ['module1', 'module2']
+
+    def test_multiline(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options]\n'
+            'package_dir = \n'
+            '  b=c\n'
+            '  =src\n'
+            'packages = \n'
+            '  pack_a\n'
+            '  pack_b.subpack\n'
+            'namespace_packages = \n'
+            '  pack1\n'
+            '  pack2\n'
+            'scripts = \n'
+            '  bin/one.py\n'
+            '  bin/two.py\n'
+            'eager_resources = \n'
+            '  bin/one.py\n'
+            '  bin/two.py\n'
+            'install_requires = \n'
+            '  docutils>=0.3\n'
+            '  pack ==1.1, ==1.3\n'
+            '  hey\n'
+            'setup_requires = \n'
+            '  docutils>=0.3\n'
+            '  spack ==1.1, ==1.3\n'
+            '  there\n'
+            'dependency_links = \n'
+            '  http://some.com/here/1\n'
+            '  http://some.com/there/2\n',
+        )
+        deprec = pytest.warns(SetuptoolsDeprecationWarning, match="namespace_packages")
+        with deprec, get_dist(tmpdir) as dist:
+            assert dist.package_dir == {'': 'src', 'b': 'c'}
+            assert dist.packages == ['pack_a', 'pack_b.subpack']
+            assert dist.namespace_packages == ['pack1', 'pack2']
+            assert dist.scripts == ['bin/one.py', 'bin/two.py']
+            assert dist.dependency_links == ([
+                'http://some.com/here/1',
+                'http://some.com/there/2',
+            ])
+            assert dist.install_requires == ([
+                'docutils>=0.3',
+                'pack==1.1,==1.3',
+                'hey',
+            ])
+            assert dist.setup_requires == ([
+                'docutils>=0.3',
+                'spack ==1.1, ==1.3',
+                'there',
+            ])
+
+    def test_package_dir_fail(self, tmpdir):
+        fake_env(tmpdir, '[options]\npackage_dir = a b\n')
+        with get_dist(tmpdir, parse=False) as dist:
+            with pytest.raises(DistutilsOptionError):
+                dist.parse_config_files()
+
+    def test_package_data(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.package_data]\n'
+            '* = *.txt, *.rst\n'
+            'hello = *.msg\n'
+            '\n'
+            '[options.exclude_package_data]\n'
+            '* = fake1.txt, fake2.txt\n'
+            'hello = *.dat\n',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.package_data == {
+                '': ['*.txt', '*.rst'],
+                'hello': ['*.msg'],
+            }
+            assert dist.exclude_package_data == {
+                '': ['fake1.txt', 'fake2.txt'],
+                'hello': ['*.dat'],
+            }
+
+    def test_packages(self, tmpdir):
+        fake_env(tmpdir, '[options]\npackages = find:\n')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.packages == ['fake_package']
+
+    def test_find_directive(self, tmpdir):
+        dir_package, config = fake_env(tmpdir, '[options]\npackages = find:\n')
+
+        make_package_dir('sub_one', dir_package)
+        make_package_dir('sub_two', dir_package)
+
+        with get_dist(tmpdir) as dist:
+            assert set(dist.packages) == set([
+                'fake_package',
+                'fake_package.sub_two',
+                'fake_package.sub_one',
+            ])
+
+        config.write(
+            '[options]\n'
+            'packages = find:\n'
+            '\n'
+            '[options.packages.find]\n'
+            'where = .\n'
+            'include =\n'
+            '    fake_package.sub_one\n'
+            '    two\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.packages == ['fake_package.sub_one']
+
+        config.write(
+            '[options]\n'
+            'packages = find:\n'
+            '\n'
+            '[options.packages.find]\n'
+            'exclude =\n'
+            '    fake_package.sub_one\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert set(dist.packages) == set(['fake_package', 'fake_package.sub_two'])
+
+    def test_find_namespace_directive(self, tmpdir):
+        dir_package, config = fake_env(
+            tmpdir, '[options]\npackages = find_namespace:\n'
+        )
+
+        make_package_dir('sub_one', dir_package)
+        make_package_dir('sub_two', dir_package, ns=True)
+
+        with get_dist(tmpdir) as dist:
+            assert set(dist.packages) == {
+                'fake_package',
+                'fake_package.sub_two',
+                'fake_package.sub_one',
+            }
+
+        config.write(
+            '[options]\n'
+            'packages = find_namespace:\n'
+            '\n'
+            '[options.packages.find]\n'
+            'where = .\n'
+            'include =\n'
+            '    fake_package.sub_one\n'
+            '    two\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert dist.packages == ['fake_package.sub_one']
+
+        config.write(
+            '[options]\n'
+            'packages = find_namespace:\n'
+            '\n'
+            '[options.packages.find]\n'
+            'exclude =\n'
+            '    fake_package.sub_one\n'
+        )
+        with get_dist(tmpdir) as dist:
+            assert set(dist.packages) == {'fake_package', 'fake_package.sub_two'}
+
+    def test_extras_require(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.extras_require]\n'
+            'pdf = ReportLab>=1.2; RXP\n'
+            'rest = \n'
+            '  docutils>=0.3\n'
+            '  pack ==1.1, ==1.3\n',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.extras_require == {
+                'pdf': ['ReportLab>=1.2', 'RXP'],
+                'rest': ['docutils>=0.3', 'pack==1.1,==1.3'],
+            }
+            assert set(dist.metadata.provides_extras) == {'pdf', 'rest'}
+
+    @pytest.mark.parametrize(
+        "config",
+        [
+            "[options.extras_require]\nfoo = bar;python_version<'3'",
+            "[options.extras_require]\nfoo = bar;os_name=='linux'",
+            "[options.extras_require]\nfoo = bar;python_version<'3'\n",
+            "[options.extras_require]\nfoo = bar;os_name=='linux'\n",
+            "[options]\ninstall_requires = bar;python_version<'3'",
+            "[options]\ninstall_requires = bar;os_name=='linux'",
+            "[options]\ninstall_requires = bar;python_version<'3'\n",
+            "[options]\ninstall_requires = bar;os_name=='linux'\n",
+        ],
+    )
+    def test_raises_accidental_env_marker_misconfig(self, config, tmpdir):
+        fake_env(tmpdir, config)
+        match = (
+            r"One of the parsed requirements in `(install_requires|extras_require.+)` "
+            "looks like a valid environment marker.*"
+        )
+        with pytest.raises(InvalidRequirement, match=match):
+            with get_dist(tmpdir) as _:
+                pass
+
+    @pytest.mark.parametrize(
+        "config",
+        [
+            "[options.extras_require]\nfoo = bar;python_version<3",
+            "[options.extras_require]\nfoo = bar;python_version<3\n",
+            "[options]\ninstall_requires = bar;python_version<3",
+            "[options]\ninstall_requires = bar;python_version<3\n",
+        ],
+    )
+    def test_warn_accidental_env_marker_misconfig(self, config, tmpdir):
+        fake_env(tmpdir, config)
+        match = (
+            r"One of the parsed requirements in `(install_requires|extras_require.+)` "
+            "looks like a valid environment marker.*"
+        )
+        with pytest.warns(SetuptoolsDeprecationWarning, match=match):
+            with get_dist(tmpdir) as _:
+                pass
+
+    @pytest.mark.parametrize(
+        "config",
+        [
+            "[options.extras_require]\nfoo =\n    bar;python_version<'3'",
+            "[options.extras_require]\nfoo = bar;baz\nboo = xxx;yyy",
+            "[options.extras_require]\nfoo =\n    bar;python_version<'3'\n",
+            "[options.extras_require]\nfoo = bar;baz\nboo = xxx;yyy\n",
+            "[options.extras_require]\nfoo =\n    bar\n    python_version<3\n",
+            "[options]\ninstall_requires =\n    bar;python_version<'3'",
+            "[options]\ninstall_requires = bar;baz\nboo = xxx;yyy",
+            "[options]\ninstall_requires =\n    bar;python_version<'3'\n",
+            "[options]\ninstall_requires = bar;baz\nboo = xxx;yyy\n",
+            "[options]\ninstall_requires =\n    bar\n    python_version<3\n",
+        ],
+    )
+    @pytest.mark.filterwarnings("error::setuptools.SetuptoolsDeprecationWarning")
+    def test_nowarn_accidental_env_marker_misconfig(self, config, tmpdir, recwarn):
+        fake_env(tmpdir, config)
+        num_warnings = len(recwarn)
+        with get_dist(tmpdir) as _:
+            pass
+        # The examples are valid, no warnings shown
+        assert len(recwarn) == num_warnings
+
+    def test_dash_preserved_extras_require(self, tmpdir):
+        fake_env(tmpdir, '[options.extras_require]\nfoo-a = foo\nfoo_b = test\n')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.extras_require == {'foo-a': ['foo'], 'foo_b': ['test']}
+
+    def test_entry_points(self, tmpdir):
+        _, config = fake_env(
+            tmpdir,
+            '[options.entry_points]\n'
+            'group1 = point1 = pack.module:func, '
+            '.point2 = pack.module2:func_rest [rest]\n'
+            'group2 = point3 = pack.module:func2\n',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.entry_points == {
+                'group1': [
+                    'point1 = pack.module:func',
+                    '.point2 = pack.module2:func_rest [rest]',
+                ],
+                'group2': ['point3 = pack.module:func2'],
+            }
+
+        expected = (
+            '[blogtool.parsers]\n'
+            '.rst = some.nested.module:SomeClass.some_classmethod[reST]\n'
+        )
+
+        tmpdir.join('entry_points').write(expected)
+
+        # From file.
+        config.write('[options]\nentry_points = file: entry_points\n')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.entry_points == expected
+
+    def test_case_sensitive_entry_points(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.entry_points]\n'
+            'GROUP1 = point1 = pack.module:func, '
+            '.point2 = pack.module2:func_rest [rest]\n'
+            'group2 = point3 = pack.module:func2\n',
+        )
+
+        with get_dist(tmpdir) as dist:
+            assert dist.entry_points == {
+                'GROUP1': [
+                    'point1 = pack.module:func',
+                    '.point2 = pack.module2:func_rest [rest]',
+                ],
+                'group2': ['point3 = pack.module:func2'],
+            }
+
+    def test_data_files(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.data_files]\n'
+            'cfg =\n'
+            '      a/b.conf\n'
+            '      c/d.conf\n'
+            'data = e/f.dat, g/h.dat\n',
+        )
+
+        with get_dist(tmpdir) as dist:
+            expected = [
+                ('cfg', ['a/b.conf', 'c/d.conf']),
+                ('data', ['e/f.dat', 'g/h.dat']),
+            ]
+            assert sorted(dist.data_files) == sorted(expected)
+
+    def test_data_files_globby(self, tmpdir):
+        fake_env(
+            tmpdir,
+            '[options.data_files]\n'
+            'cfg =\n'
+            '      a/b.conf\n'
+            '      c/d.conf\n'
+            'data = *.dat\n'
+            'icons = \n'
+            '      *.ico\n'
+            'audio = \n'
+            '      *.wav\n'
+            '      sounds.db\n',
+        )
+
+        # Create dummy files for glob()'s sake:
+        tmpdir.join('a.dat').write('')
+        tmpdir.join('b.dat').write('')
+        tmpdir.join('c.dat').write('')
+        tmpdir.join('a.ico').write('')
+        tmpdir.join('b.ico').write('')
+        tmpdir.join('c.ico').write('')
+        tmpdir.join('beep.wav').write('')
+        tmpdir.join('boop.wav').write('')
+        tmpdir.join('sounds.db').write('')
+
+        with get_dist(tmpdir) as dist:
+            expected = [
+                ('cfg', ['a/b.conf', 'c/d.conf']),
+                ('data', ['a.dat', 'b.dat', 'c.dat']),
+                ('icons', ['a.ico', 'b.ico', 'c.ico']),
+                ('audio', ['beep.wav', 'boop.wav', 'sounds.db']),
+            ]
+            assert sorted(dist.data_files) == sorted(expected)
+
+    def test_python_requires_simple(self, tmpdir):
+        fake_env(
+            tmpdir,
+            DALS(
+                """
+            [options]
+            python_requires=>=2.7
+            """
+            ),
+        )
+        with get_dist(tmpdir) as dist:
+            dist.parse_config_files()
+
+    def test_python_requires_compound(self, tmpdir):
+        fake_env(
+            tmpdir,
+            DALS(
+                """
+            [options]
+            python_requires=>=2.7,!=3.0.*
+            """
+            ),
+        )
+        with get_dist(tmpdir) as dist:
+            dist.parse_config_files()
+
+    def test_python_requires_invalid(self, tmpdir):
+        fake_env(
+            tmpdir,
+            DALS(
+                """
+            [options]
+            python_requires=invalid
+            """
+            ),
+        )
+        with pytest.raises(Exception):
+            with get_dist(tmpdir) as dist:
+                dist.parse_config_files()
+
+    def test_cmdclass(self, tmpdir):
+        module_path = Path(tmpdir, "src/custom_build.py")  # auto discovery for src
+        module_path.parent.mkdir(parents=True, exist_ok=True)
+        module_path.write_text(
+            "from distutils.core import Command\nclass CustomCmd(Command): pass\n",
+            encoding="utf-8",
+        )
+
+        setup_cfg = """
+            [options]
+            cmdclass =
+                customcmd = custom_build.CustomCmd
+        """
+        fake_env(tmpdir, inspect.cleandoc(setup_cfg))
+
+        with get_dist(tmpdir) as dist:
+            cmdclass = dist.cmdclass['customcmd']
+            assert cmdclass.__name__ == "CustomCmd"
+            assert cmdclass.__module__ == "custom_build"
+            assert module_path.samefile(inspect.getfile(cmdclass))
+
+    def test_requirements_file(self, tmpdir):
+        fake_env(
+            tmpdir,
+            DALS(
+                """
+            [options]
+            install_requires = file:requirements.txt
+            [options.extras_require]
+            colors = file:requirements-extra.txt
+            """
+            ),
+        )
+
+        tmpdir.join('requirements.txt').write('\ndocutils>=0.3\n\n')
+        tmpdir.join('requirements-extra.txt').write('colorama')
+
+        with get_dist(tmpdir) as dist:
+            assert dist.install_requires == ['docutils>=0.3']
+            assert dist.extras_require == {'colors': ['colorama']}
+
+
+saved_dist_init = _Distribution.__init__
+
+
+class TestExternalSetters:
+    # During creation of the setuptools Distribution() object, we call
+    # the init of the parent distutils Distribution object via
+    # _Distribution.__init__ ().
+    #
+    # It's possible distutils calls out to various keyword
+    # implementations (i.e. distutils.setup_keywords entry points)
+    # that may set a range of variables.
+    #
+    # This wraps distutil's Distribution.__init__ and simulates
+    # pbr or something else setting these values.
+    def _fake_distribution_init(self, dist, attrs):
+        saved_dist_init(dist, attrs)
+        # see self._DISTUTILS_UNSUPPORTED_METADATA
+        dist.metadata.long_description_content_type = 'text/something'
+        # Test overwrite setup() args
+        dist.metadata.project_urls = {
+            'Link One': 'https://example.com/one/',
+            'Link Two': 'https://example.com/two/',
+        }
+
+    @patch.object(_Distribution, '__init__', autospec=True)
+    def test_external_setters(self, mock_parent_init, tmpdir):
+        mock_parent_init.side_effect = self._fake_distribution_init
+
+        dist = Distribution(attrs={'project_urls': {'will_be': 'ignored'}})
+
+        assert dist.metadata.long_description_content_type == 'text/something'
+        assert dist.metadata.project_urls == {
+            'Link One': 'https://example.com/one/',
+            'Link Two': 'https://example.com/two/',
+        }
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/fixtures.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5472984b5690572285a84575f6c2d598f06dd11
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/fixtures.py
@@ -0,0 +1,157 @@
+import contextlib
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+import path
+import pytest
+
+from . import contexts, environment
+
+
+@pytest.fixture
+def user_override(monkeypatch):
+    """
+    Override site.USER_BASE and site.USER_SITE with temporary directories in
+    a context.
+    """
+    with contexts.tempdir() as user_base:
+        monkeypatch.setattr('site.USER_BASE', user_base)
+        with contexts.tempdir() as user_site:
+            monkeypatch.setattr('site.USER_SITE', user_site)
+            with contexts.save_user_site_setting():
+                yield
+
+
+@pytest.fixture
+def tmpdir_cwd(tmpdir):
+    with tmpdir.as_cwd() as orig:
+        yield orig
+
+
+@pytest.fixture(autouse=True, scope="session")
+def workaround_xdist_376(request):
+    """
+    Workaround pytest-dev/pytest-xdist#376
+
+    ``pytest-xdist`` tends to inject '' into ``sys.path``,
+    which may break certain isolation expectations.
+    Remove the entry so the import
+    machinery behaves the same irrespective of xdist.
+    """
+    if not request.config.pluginmanager.has_plugin('xdist'):
+        return
+
+    with contextlib.suppress(ValueError):
+        sys.path.remove('')
+
+
+@pytest.fixture
+def sample_project(tmp_path):
+    """
+    Clone the 'sampleproject' and return a path to it.
+    """
+    cmd = ['git', 'clone', 'https://github.com/pypa/sampleproject']
+    try:
+        subprocess.check_call(cmd, cwd=str(tmp_path))
+    except Exception:
+        pytest.skip("Unable to clone sampleproject")
+    return tmp_path / 'sampleproject'
+
+
+# sdist and wheel artifacts should be stable across a round of tests
+# so we can build them once per session and use the files as "readonly"
+
+# In the case of setuptools, building the wheel without sdist may cause
+# it to contain the `build` directory, and therefore create situations with
+# `setuptools/build/lib/build/lib/...`. To avoid that, build both artifacts at once.
+
+
+def _build_distributions(tmp_path_factory, request):
+    with contexts.session_locked_tmp_dir(
+        request, tmp_path_factory, "dist_build"
+    ) as tmp:  # pragma: no cover
+        sdist = next(tmp.glob("*.tar.gz"), None)
+        wheel = next(tmp.glob("*.whl"), None)
+        if sdist and wheel:
+            return (sdist, wheel)
+
+        # Sanity check: should not create recursive setuptools/build/lib/build/lib/...
+        assert not Path(request.config.rootdir, "build/lib/build").exists()
+
+        subprocess.check_output([
+            sys.executable,
+            "-m",
+            "build",
+            "--outdir",
+            str(tmp),
+            str(request.config.rootdir),
+        ])
+
+        # Sanity check: should not create recursive setuptools/build/lib/build/lib/...
+        assert not Path(request.config.rootdir, "build/lib/build").exists()
+
+        return next(tmp.glob("*.tar.gz")), next(tmp.glob("*.whl"))
+
+
+@pytest.fixture(scope="session")
+def setuptools_sdist(tmp_path_factory, request):
+    prebuilt = os.getenv("PRE_BUILT_SETUPTOOLS_SDIST")
+    if prebuilt and os.path.exists(prebuilt):  # pragma: no cover
+        return Path(prebuilt).resolve()
+
+    sdist, _ = _build_distributions(tmp_path_factory, request)
+    return sdist
+
+
+@pytest.fixture(scope="session")
+def setuptools_wheel(tmp_path_factory, request):
+    prebuilt = os.getenv("PRE_BUILT_SETUPTOOLS_WHEEL")
+    if prebuilt and os.path.exists(prebuilt):  # pragma: no cover
+        return Path(prebuilt).resolve()
+
+    _, wheel = _build_distributions(tmp_path_factory, request)
+    return wheel
+
+
+@pytest.fixture
+def venv(tmp_path, setuptools_wheel):
+    """Virtual env with the version of setuptools under test installed"""
+    env = environment.VirtualEnv()
+    env.root = path.Path(tmp_path / 'venv')
+    env.create_opts = ['--no-setuptools', '--wheel=bundle']
+    # TODO: Use `--no-wheel` when setuptools implements its own bdist_wheel
+    env.req = str(setuptools_wheel)
+    # In some environments (eg. downstream distro packaging),
+    # where tox isn't used to run tests and PYTHONPATH is set to point to
+    # a specific setuptools codebase, PYTHONPATH will leak into the spawned
+    # processes.
+    # env.create() should install the just created setuptools
+    # wheel, but it doesn't if it finds another existing matching setuptools
+    # installation present on PYTHONPATH:
+    # `setuptools is already installed with the same version as the provided
+    # wheel. Use --force-reinstall to force an installation of the wheel.`
+    # This prevents leaking PYTHONPATH to the created environment.
+    with contexts.environment(PYTHONPATH=None):
+        return env.create()
+
+
+@pytest.fixture
+def venv_without_setuptools(tmp_path):
+    """Virtual env without any version of setuptools installed"""
+    env = environment.VirtualEnv()
+    env.root = path.Path(tmp_path / 'venv_without_setuptools')
+    env.create_opts = ['--no-setuptools', '--no-wheel']
+    env.ensure_env()
+    return env
+
+
+@pytest.fixture
+def bare_venv(tmp_path):
+    """Virtual env without any common packages installed"""
+    env = environment.VirtualEnv()
+    env.root = path.Path(tmp_path / 'bare_venv')
+    env.create_opts = ['--no-setuptools', '--no-pip', '--no-wheel', '--no-seed']
+    env.ensure_env()
+    return env
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/script-with-bom.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/script-with-bom.py
new file mode 100644
index 0000000000000000000000000000000000000000..c074d263c45bcaebe32fdba328d975c73d1ad5ca
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/script-with-bom.py
@@ -0,0 +1 @@
+result = 'passed'
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/server.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..623a49a550f161f117d3af2173f91a2af260181d
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/server.py
@@ -0,0 +1,86 @@
+"""Basic http server for tests to simulate PyPI or custom indexes"""
+
+import http.server
+import os
+import threading
+import time
+import urllib.parse
+import urllib.request
+
+
+class IndexServer(http.server.HTTPServer):
+    """Basic single-threaded http server simulating a package index
+
+    You can use this server in unittest like this::
+        s = IndexServer()
+        s.start()
+        index_url = s.base_url() + 'mytestindex'
+        # do some test requests to the index
+        # The index files should be located in setuptools/tests/indexes
+        s.stop()
+    """
+
+    def __init__(
+        self,
+        server_address=('', 0),
+        RequestHandlerClass=http.server.SimpleHTTPRequestHandler,
+    ):
+        http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
+        self._run = True
+
+    def start(self):
+        self.thread = threading.Thread(target=self.serve_forever)
+        self.thread.start()
+
+    def stop(self):
+        "Stop the server"
+
+        # Let the server finish the last request and wait for a new one.
+        time.sleep(0.1)
+
+        self.shutdown()
+        self.thread.join()
+        self.socket.close()
+
+    def base_url(self):
+        port = self.server_port
+        return f'http://127.0.0.1:{port}/setuptools/tests/indexes/'
+
+
+class RequestRecorder(http.server.BaseHTTPRequestHandler):
+    def do_GET(self):
+        requests = vars(self.server).setdefault('requests', [])
+        requests.append(self)
+        self.send_response(200, 'OK')
+
+
+class MockServer(http.server.HTTPServer, threading.Thread):
+    """
+    A simple HTTP Server that records the requests made to it.
+    """
+
+    def __init__(self, server_address=('', 0), RequestHandlerClass=RequestRecorder):
+        http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
+        threading.Thread.__init__(self)
+        self.daemon = True
+        self.requests = []
+
+    def run(self):
+        self.serve_forever()
+
+    @property
+    def netloc(self):
+        return f'localhost:{self.server_port}'
+
+    @property
+    def url(self):
+        return f'http://{self.netloc}/'
+
+
+def path_to_url(path, authority=None):
+    """Convert a path to a file: URL."""
+    path = os.path.normpath(os.path.abspath(path))
+    base = 'file:'
+    if authority is not None:
+        base += '//' + authority
+    return urllib.parse.urljoin(base, urllib.request.pathname2url(path))
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_bdist_egg.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_bdist_egg.py
new file mode 100644
index 0000000000000000000000000000000000000000..036167dd951e70ad543775529d5ce3f6d6544c71
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_bdist_egg.py
@@ -0,0 +1,73 @@
+"""develop tests"""
+
+import os
+import re
+import zipfile
+
+import pytest
+
+from setuptools.dist import Distribution
+
+from . import contexts
+
+SETUP_PY = """\
+from setuptools import setup
+
+setup(py_modules=['hi'])
+"""
+
+
+@pytest.fixture
+def setup_context(tmpdir):
+    with (tmpdir / 'setup.py').open('w') as f:
+        f.write(SETUP_PY)
+    with (tmpdir / 'hi.py').open('w') as f:
+        f.write('1\n')
+    with tmpdir.as_cwd():
+        yield tmpdir
+
+
+class Test:
+    @pytest.mark.usefixtures("user_override")
+    @pytest.mark.usefixtures("setup_context")
+    def test_bdist_egg(self):
+        dist = Distribution(
+            dict(
+                script_name='setup.py',
+                script_args=['bdist_egg'],
+                name='foo',
+                py_modules=['hi'],
+            )
+        )
+        os.makedirs(os.path.join('build', 'src'))
+        with contexts.quiet():
+            dist.parse_command_line()
+            dist.run_commands()
+
+        # let's see if we got our egg link at the right place
+        [content] = os.listdir('dist')
+        assert re.match(r'foo-0.0.0-py[23].\d+.egg$', content)
+
+    @pytest.mark.xfail(
+        os.environ.get('PYTHONDONTWRITEBYTECODE', False),
+        reason="Byte code disabled",
+    )
+    @pytest.mark.usefixtures("user_override")
+    @pytest.mark.usefixtures("setup_context")
+    def test_exclude_source_files(self):
+        dist = Distribution(
+            dict(
+                script_name='setup.py',
+                script_args=['bdist_egg', '--exclude-source-files'],
+                py_modules=['hi'],
+            )
+        )
+        with contexts.quiet():
+            dist.parse_command_line()
+            dist.run_commands()
+        [dist_name] = os.listdir('dist')
+        dist_filename = os.path.join('dist', dist_name)
+        zip = zipfile.ZipFile(dist_filename)
+        names = list(zi.filename for zi in zip.filelist)
+        assert 'hi.pyc' in names
+        assert 'hi.py' not in names
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0f1d9dcf21bafe9dc82a76d373b366ddeecfcec
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build.py
@@ -0,0 +1,33 @@
+from setuptools import Command
+from setuptools.command.build import build
+from setuptools.dist import Distribution
+
+
+def test_distribution_gives_setuptools_build_obj(tmpdir_cwd):
+    """
+    Check that the setuptools Distribution uses the
+    setuptools specific build object.
+    """
+
+    dist = Distribution(
+        dict(
+            script_name='setup.py',
+            script_args=['build'],
+            packages=[],
+            package_data={'': ['path/*']},
+        )
+    )
+    assert isinstance(dist.get_command_obj("build"), build)
+
+
+class Subcommand(Command):
+    """Dummy command to be used in tests"""
+
+    def initialize_options(self):
+        pass
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        raise NotImplementedError("just to check if the command runs")
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build_meta.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build_meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..121f409057b2ef84f388db3afa93ef613bef9a37
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build_meta.py
@@ -0,0 +1,970 @@
+import contextlib
+import importlib
+import os
+import re
+import shutil
+import signal
+import sys
+import tarfile
+from concurrent import futures
+from pathlib import Path
+from typing import Any, Callable
+from zipfile import ZipFile
+
+import pytest
+from jaraco import path
+from packaging.requirements import Requirement
+
+from .textwrap import DALS
+
+SETUP_SCRIPT_STUB = "__import__('setuptools').setup()"
+
+
+TIMEOUT = int(os.getenv("TIMEOUT_BACKEND_TEST", "180"))  # in seconds
+IS_PYPY = '__pypy__' in sys.builtin_module_names
+
+
+pytestmark = pytest.mark.skipif(
+    sys.platform == "win32" and IS_PYPY,
+    reason="The combination of PyPy + Windows + pytest-xdist + ProcessPoolExecutor "
+    "is flaky and problematic",
+)
+
+
+class BuildBackendBase:
+    def __init__(self, cwd='.', env=None, backend_name='setuptools.build_meta'):
+        self.cwd = cwd
+        self.env = env or {}
+        self.backend_name = backend_name
+
+
+class BuildBackend(BuildBackendBase):
+    """PEP 517 Build Backend"""
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.pool = futures.ProcessPoolExecutor(max_workers=1)
+
+    def __getattr__(self, name: str) -> Callable[..., Any]:
+        """Handles arbitrary function invocations on the build backend."""
+
+        def method(*args, **kw):
+            root = os.path.abspath(self.cwd)
+            caller = BuildBackendCaller(root, self.env, self.backend_name)
+            pid = None
+            try:
+                pid = self.pool.submit(os.getpid).result(TIMEOUT)
+                return self.pool.submit(caller, name, *args, **kw).result(TIMEOUT)
+            except futures.TimeoutError:
+                self.pool.shutdown(wait=False)  # doesn't stop already running processes
+                self._kill(pid)
+                pytest.xfail(f"Backend did not respond before timeout ({TIMEOUT} s)")
+            except (futures.process.BrokenProcessPool, MemoryError, OSError):
+                if IS_PYPY:
+                    pytest.xfail("PyPy frequently fails tests with ProcessPoolExector")
+                raise
+
+        return method
+
+    def _kill(self, pid):
+        if pid is None:
+            return
+        with contextlib.suppress(ProcessLookupError, OSError):
+            os.kill(pid, signal.SIGTERM if os.name == "nt" else signal.SIGKILL)
+
+
+class BuildBackendCaller(BuildBackendBase):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        (self.backend_name, _, self.backend_obj) = self.backend_name.partition(':')
+
+    def __call__(self, name, *args, **kw):
+        """Handles arbitrary function invocations on the build backend."""
+        os.chdir(self.cwd)
+        os.environ.update(self.env)
+        mod = importlib.import_module(self.backend_name)
+
+        if self.backend_obj:
+            backend = getattr(mod, self.backend_obj)
+        else:
+            backend = mod
+
+        return getattr(backend, name)(*args, **kw)
+
+
+defns = [
+    {  # simple setup.py script
+        'setup.py': DALS(
+            """
+            __import__('setuptools').setup(
+                name='foo',
+                version='0.0.0',
+                py_modules=['hello'],
+                setup_requires=['six'],
+            )
+            """
+        ),
+        'hello.py': DALS(
+            """
+            def run():
+                print('hello')
+            """
+        ),
+    },
+    {  # setup.py that relies on __name__
+        'setup.py': DALS(
+            """
+            assert __name__ == '__main__'
+            __import__('setuptools').setup(
+                name='foo',
+                version='0.0.0',
+                py_modules=['hello'],
+                setup_requires=['six'],
+            )
+            """
+        ),
+        'hello.py': DALS(
+            """
+            def run():
+                print('hello')
+            """
+        ),
+    },
+    {  # setup.py script that runs arbitrary code
+        'setup.py': DALS(
+            """
+            variable = True
+            def function():
+                return variable
+            assert variable
+            __import__('setuptools').setup(
+                name='foo',
+                version='0.0.0',
+                py_modules=['hello'],
+                setup_requires=['six'],
+            )
+            """
+        ),
+        'hello.py': DALS(
+            """
+            def run():
+                print('hello')
+            """
+        ),
+    },
+    {  # setup.py script that constructs temp files to be included in the distribution
+        'setup.py': DALS(
+            """
+            # Some packages construct files on the fly, include them in the package,
+            # and immediately remove them after `setup()` (e.g. pybind11==2.9.1).
+            # Therefore, we cannot use `distutils.core.run_setup(..., stop_after=...)`
+            # to obtain a distribution object first, and then run the distutils
+            # commands later, because these files will be removed in the meantime.
+
+            with open('world.py', 'w', encoding="utf-8") as f:
+                f.write('x = 42')
+
+            try:
+                __import__('setuptools').setup(
+                    name='foo',
+                    version='0.0.0',
+                    py_modules=['world'],
+                    setup_requires=['six'],
+                )
+            finally:
+                # Some packages will clean temporary files
+                __import__('os').unlink('world.py')
+            """
+        ),
+    },
+    {  # setup.cfg only
+        'setup.cfg': DALS(
+            """
+        [metadata]
+        name = foo
+        version = 0.0.0
+
+        [options]
+        py_modules=hello
+        setup_requires=six
+        """
+        ),
+        'hello.py': DALS(
+            """
+        def run():
+            print('hello')
+        """
+        ),
+    },
+    {  # setup.cfg and setup.py
+        'setup.cfg': DALS(
+            """
+        [metadata]
+        name = foo
+        version = 0.0.0
+
+        [options]
+        py_modules=hello
+        setup_requires=six
+        """
+        ),
+        'setup.py': "__import__('setuptools').setup()",
+        'hello.py': DALS(
+            """
+        def run():
+            print('hello')
+        """
+        ),
+    },
+]
+
+
+class TestBuildMetaBackend:
+    backend_name = 'setuptools.build_meta'
+
+    def get_build_backend(self):
+        return BuildBackend(backend_name=self.backend_name)
+
+    @pytest.fixture(params=defns)
+    def build_backend(self, tmpdir, request):
+        path.build(request.param, prefix=str(tmpdir))
+        with tmpdir.as_cwd():
+            yield self.get_build_backend()
+
+    def test_get_requires_for_build_wheel(self, build_backend):
+        actual = build_backend.get_requires_for_build_wheel()
+        expected = ['six']
+        assert sorted(actual) == sorted(expected)
+
+    def test_get_requires_for_build_sdist(self, build_backend):
+        actual = build_backend.get_requires_for_build_sdist()
+        expected = ['six']
+        assert sorted(actual) == sorted(expected)
+
+    def test_build_wheel(self, build_backend):
+        dist_dir = os.path.abspath('pip-wheel')
+        os.makedirs(dist_dir)
+        wheel_name = build_backend.build_wheel(dist_dir)
+
+        wheel_file = os.path.join(dist_dir, wheel_name)
+        assert os.path.isfile(wheel_file)
+
+        # Temporary files should be removed
+        assert not os.path.isfile('world.py')
+
+        with ZipFile(wheel_file) as zipfile:
+            wheel_contents = set(zipfile.namelist())
+
+        # Each one of the examples have a single module
+        # that should be included in the distribution
+        python_scripts = (f for f in wheel_contents if f.endswith('.py'))
+        modules = [f for f in python_scripts if not f.endswith('setup.py')]
+        assert len(modules) == 1
+
+    @pytest.mark.parametrize('build_type', ('wheel', 'sdist'))
+    def test_build_with_existing_file_present(self, build_type, tmpdir_cwd):
+        # Building a sdist/wheel should still succeed if there's
+        # already a sdist/wheel in the destination directory.
+        files = {
+            'setup.py': "from setuptools import setup\nsetup()",
+            'VERSION': "0.0.1",
+            'setup.cfg': DALS(
+                """
+                [metadata]
+                name = foo
+                version = file: VERSION
+                """
+            ),
+            'pyproject.toml': DALS(
+                """
+                [build-system]
+                requires = ["setuptools", "wheel"]
+                build-backend = "setuptools.build_meta"
+                """
+            ),
+        }
+
+        path.build(files)
+
+        dist_dir = os.path.abspath('preexisting-' + build_type)
+
+        build_backend = self.get_build_backend()
+        build_method = getattr(build_backend, 'build_' + build_type)
+
+        # Build a first sdist/wheel.
+        # Note: this also check the destination directory is
+        # successfully created if it does not exist already.
+        first_result = build_method(dist_dir)
+
+        # Change version.
+        with open("VERSION", "wt", encoding="utf-8") as version_file:
+            version_file.write("0.0.2")
+
+        # Build a *second* sdist/wheel.
+        second_result = build_method(dist_dir)
+
+        assert os.path.isfile(os.path.join(dist_dir, first_result))
+        assert first_result != second_result
+
+        # And if rebuilding the exact same sdist/wheel?
+        open(os.path.join(dist_dir, second_result), 'wb').close()
+        third_result = build_method(dist_dir)
+        assert third_result == second_result
+        assert os.path.getsize(os.path.join(dist_dir, third_result)) > 0
+
+    @pytest.mark.parametrize("setup_script", [None, SETUP_SCRIPT_STUB])
+    def test_build_with_pyproject_config(self, tmpdir, setup_script):
+        files = {
+            'pyproject.toml': DALS(
+                """
+                [build-system]
+                requires = ["setuptools", "wheel"]
+                build-backend = "setuptools.build_meta"
+
+                [project]
+                name = "foo"
+                license = {text = "MIT"}
+                description = "This is a Python package"
+                dynamic = ["version", "readme"]
+                classifiers = [
+                    "Development Status :: 5 - Production/Stable",
+                    "Intended Audience :: Developers"
+                ]
+                urls = {Homepage = "http://github.com"}
+                dependencies = [
+                    "appdirs",
+                ]
+
+                [project.optional-dependencies]
+                all = [
+                    "tomli>=1",
+                    "pyscaffold>=4,<5",
+                    'importlib; python_version == "2.6"',
+                ]
+
+                [project.scripts]
+                foo = "foo.cli:main"
+
+                [tool.setuptools]
+                zip-safe = false
+                package-dir = {"" = "src"}
+                packages = {find = {where = ["src"]}}
+                license-files = ["LICENSE*"]
+
+                [tool.setuptools.dynamic]
+                version = {attr = "foo.__version__"}
+                readme = {file = "README.rst"}
+
+                [tool.distutils.sdist]
+                formats = "gztar"
+                """
+            ),
+            "MANIFEST.in": DALS(
+                """
+                global-include *.py *.txt
+                global-exclude *.py[cod]
+                """
+            ),
+            "README.rst": "This is a ``README``",
+            "LICENSE.txt": "---- placeholder MIT license ----",
+            "src": {
+                "foo": {
+                    "__init__.py": "__version__ = '0.1'",
+                    "__init__.pyi": "__version__: str",
+                    "cli.py": "def main(): print('hello world')",
+                    "data.txt": "def main(): print('hello world')",
+                    "py.typed": "",
+                }
+            },
+        }
+        if setup_script:
+            files["setup.py"] = setup_script
+
+        build_backend = self.get_build_backend()
+        with tmpdir.as_cwd():
+            path.build(files)
+            sdist_path = build_backend.build_sdist("temp")
+            wheel_file = build_backend.build_wheel("temp")
+
+        with tarfile.open(os.path.join(tmpdir, "temp", sdist_path)) as tar:
+            sdist_contents = set(tar.getnames())
+
+        with ZipFile(os.path.join(tmpdir, "temp", wheel_file)) as zipfile:
+            wheel_contents = set(zipfile.namelist())
+            metadata = str(zipfile.read("foo-0.1.dist-info/METADATA"), "utf-8")
+            license = str(zipfile.read("foo-0.1.dist-info/LICENSE.txt"), "utf-8")
+            epoints = str(zipfile.read("foo-0.1.dist-info/entry_points.txt"), "utf-8")
+
+        assert sdist_contents - {"foo-0.1/setup.py"} == {
+            'foo-0.1',
+            'foo-0.1/LICENSE.txt',
+            'foo-0.1/MANIFEST.in',
+            'foo-0.1/PKG-INFO',
+            'foo-0.1/README.rst',
+            'foo-0.1/pyproject.toml',
+            'foo-0.1/setup.cfg',
+            'foo-0.1/src',
+            'foo-0.1/src/foo',
+            'foo-0.1/src/foo/__init__.py',
+            'foo-0.1/src/foo/__init__.pyi',
+            'foo-0.1/src/foo/cli.py',
+            'foo-0.1/src/foo/data.txt',
+            'foo-0.1/src/foo/py.typed',
+            'foo-0.1/src/foo.egg-info',
+            'foo-0.1/src/foo.egg-info/PKG-INFO',
+            'foo-0.1/src/foo.egg-info/SOURCES.txt',
+            'foo-0.1/src/foo.egg-info/dependency_links.txt',
+            'foo-0.1/src/foo.egg-info/entry_points.txt',
+            'foo-0.1/src/foo.egg-info/requires.txt',
+            'foo-0.1/src/foo.egg-info/top_level.txt',
+            'foo-0.1/src/foo.egg-info/not-zip-safe',
+        }
+        assert wheel_contents == {
+            "foo/__init__.py",
+            "foo/__init__.pyi",  # include type information by default
+            "foo/cli.py",
+            "foo/data.txt",  # include_package_data defaults to True
+            "foo/py.typed",  # include type information by default
+            "foo-0.1.dist-info/LICENSE.txt",
+            "foo-0.1.dist-info/METADATA",
+            "foo-0.1.dist-info/WHEEL",
+            "foo-0.1.dist-info/entry_points.txt",
+            "foo-0.1.dist-info/top_level.txt",
+            "foo-0.1.dist-info/RECORD",
+        }
+        assert license == "---- placeholder MIT license ----"
+
+        for line in (
+            "Summary: This is a Python package",
+            "License: MIT",
+            "Classifier: Intended Audience :: Developers",
+            "Requires-Dist: appdirs",
+            "Requires-Dist: " + str(Requirement('tomli>=1 ; extra == "all"')),
+            "Requires-Dist: "
+            + str(Requirement('importlib; python_version=="2.6" and extra =="all"')),
+        ):
+            assert line in metadata, (line, metadata)
+
+        assert metadata.strip().endswith("This is a ``README``")
+        assert epoints.strip() == "[console_scripts]\nfoo = foo.cli:main"
+
+    def test_static_metadata_in_pyproject_config(self, tmpdir):
+        # Make sure static metadata in pyproject.toml is not overwritten by setup.py
+        # as required by PEP 621
+        files = {
+            'pyproject.toml': DALS(
+                """
+                [build-system]
+                requires = ["setuptools", "wheel"]
+                build-backend = "setuptools.build_meta"
+
+                [project]
+                name = "foo"
+                description = "This is a Python package"
+                version = "42"
+                dependencies = ["six"]
+                """
+            ),
+            'hello.py': DALS(
+                """
+                def run():
+                    print('hello')
+                """
+            ),
+            'setup.py': DALS(
+                """
+                __import__('setuptools').setup(
+                    name='bar',
+                    version='13',
+                )
+                """
+            ),
+        }
+        build_backend = self.get_build_backend()
+        with tmpdir.as_cwd():
+            path.build(files)
+            sdist_path = build_backend.build_sdist("temp")
+            wheel_file = build_backend.build_wheel("temp")
+
+        assert (tmpdir / "temp/foo-42.tar.gz").exists()
+        assert (tmpdir / "temp/foo-42-py3-none-any.whl").exists()
+        assert not (tmpdir / "temp/bar-13.tar.gz").exists()
+        assert not (tmpdir / "temp/bar-42.tar.gz").exists()
+        assert not (tmpdir / "temp/foo-13.tar.gz").exists()
+        assert not (tmpdir / "temp/bar-13-py3-none-any.whl").exists()
+        assert not (tmpdir / "temp/bar-42-py3-none-any.whl").exists()
+        assert not (tmpdir / "temp/foo-13-py3-none-any.whl").exists()
+
+        with tarfile.open(os.path.join(tmpdir, "temp", sdist_path)) as tar:
+            pkg_info = str(tar.extractfile('foo-42/PKG-INFO').read(), "utf-8")
+            members = tar.getnames()
+            assert "bar-13/PKG-INFO" not in members
+
+        with ZipFile(os.path.join(tmpdir, "temp", wheel_file)) as zipfile:
+            metadata = str(zipfile.read("foo-42.dist-info/METADATA"), "utf-8")
+            members = zipfile.namelist()
+            assert "bar-13.dist-info/METADATA" not in members
+
+        for file in pkg_info, metadata:
+            for line in ("Name: foo", "Version: 42"):
+                assert line in file
+            for line in ("Name: bar", "Version: 13"):
+                assert line not in file
+
+    def test_build_sdist(self, build_backend):
+        dist_dir = os.path.abspath('pip-sdist')
+        os.makedirs(dist_dir)
+        sdist_name = build_backend.build_sdist(dist_dir)
+
+        assert os.path.isfile(os.path.join(dist_dir, sdist_name))
+
+    def test_prepare_metadata_for_build_wheel(self, build_backend):
+        dist_dir = os.path.abspath('pip-dist-info')
+        os.makedirs(dist_dir)
+
+        dist_info = build_backend.prepare_metadata_for_build_wheel(dist_dir)
+
+        assert os.path.isfile(os.path.join(dist_dir, dist_info, 'METADATA'))
+
+    def test_prepare_metadata_inplace(self, build_backend):
+        """
+        Some users might pass metadata_directory pre-populated with `.tox` or `.venv`.
+        See issue #3523.
+        """
+        for pre_existing in [
+            ".tox/python/lib/python3.10/site-packages/attrs-22.1.0.dist-info",
+            ".tox/python/lib/python3.10/site-packages/autocommand-2.2.1.dist-info",
+            ".nox/python/lib/python3.10/site-packages/build-0.8.0.dist-info",
+            ".venv/python3.10/site-packages/click-8.1.3.dist-info",
+            "venv/python3.10/site-packages/distlib-0.3.5.dist-info",
+            "env/python3.10/site-packages/docutils-0.19.dist-info",
+        ]:
+            os.makedirs(pre_existing, exist_ok=True)
+        dist_info = build_backend.prepare_metadata_for_build_wheel(".")
+        assert os.path.isfile(os.path.join(dist_info, 'METADATA'))
+
+    def test_build_sdist_explicit_dist(self, build_backend):
+        # explicitly specifying the dist folder should work
+        # the folder sdist_directory and the ``--dist-dir`` can be the same
+        dist_dir = os.path.abspath('dist')
+        sdist_name = build_backend.build_sdist(dist_dir)
+        assert os.path.isfile(os.path.join(dist_dir, sdist_name))
+
+    def test_build_sdist_version_change(self, build_backend):
+        sdist_into_directory = os.path.abspath("out_sdist")
+        os.makedirs(sdist_into_directory)
+
+        sdist_name = build_backend.build_sdist(sdist_into_directory)
+        assert os.path.isfile(os.path.join(sdist_into_directory, sdist_name))
+
+        # if the setup.py changes subsequent call of the build meta
+        # should still succeed, given the
+        # sdist_directory the frontend specifies is empty
+        setup_loc = os.path.abspath("setup.py")
+        if not os.path.exists(setup_loc):
+            setup_loc = os.path.abspath("setup.cfg")
+
+        with open(setup_loc, 'rt', encoding="utf-8") as file_handler:
+            content = file_handler.read()
+        with open(setup_loc, 'wt', encoding="utf-8") as file_handler:
+            file_handler.write(content.replace("version='0.0.0'", "version='0.0.1'"))
+
+        shutil.rmtree(sdist_into_directory)
+        os.makedirs(sdist_into_directory)
+
+        sdist_name = build_backend.build_sdist("out_sdist")
+        assert os.path.isfile(os.path.join(os.path.abspath("out_sdist"), sdist_name))
+
+    def test_build_sdist_pyproject_toml_exists(self, tmpdir_cwd):
+        files = {
+            'setup.py': DALS(
+                """
+                __import__('setuptools').setup(
+                    name='foo',
+                    version='0.0.0',
+                    py_modules=['hello']
+                )"""
+            ),
+            'hello.py': '',
+            'pyproject.toml': DALS(
+                """
+                [build-system]
+                requires = ["setuptools", "wheel"]
+                build-backend = "setuptools.build_meta"
+                """
+            ),
+        }
+        path.build(files)
+        build_backend = self.get_build_backend()
+        targz_path = build_backend.build_sdist("temp")
+        with tarfile.open(os.path.join("temp", targz_path)) as tar:
+            assert any('pyproject.toml' in name for name in tar.getnames())
+
+    def test_build_sdist_setup_py_exists(self, tmpdir_cwd):
+        # If build_sdist is called from a script other than setup.py,
+        # ensure setup.py is included
+        path.build(defns[0])
+
+        build_backend = self.get_build_backend()
+        targz_path = build_backend.build_sdist("temp")
+        with tarfile.open(os.path.join("temp", targz_path)) as tar:
+            assert any('setup.py' in name for name in tar.getnames())
+
+    def test_build_sdist_setup_py_manifest_excluded(self, tmpdir_cwd):
+        # Ensure that MANIFEST.in can exclude setup.py
+        files = {
+            'setup.py': DALS(
+                """
+        __import__('setuptools').setup(
+            name='foo',
+            version='0.0.0',
+            py_modules=['hello']
+        )"""
+            ),
+            'hello.py': '',
+            'MANIFEST.in': DALS(
+                """
+        exclude setup.py
+        """
+            ),
+        }
+
+        path.build(files)
+
+        build_backend = self.get_build_backend()
+        targz_path = build_backend.build_sdist("temp")
+        with tarfile.open(os.path.join("temp", targz_path)) as tar:
+            assert not any('setup.py' in name for name in tar.getnames())
+
+    def test_build_sdist_builds_targz_even_if_zip_indicated(self, tmpdir_cwd):
+        files = {
+            'setup.py': DALS(
+                """
+                __import__('setuptools').setup(
+                    name='foo',
+                    version='0.0.0',
+                    py_modules=['hello']
+                )"""
+            ),
+            'hello.py': '',
+            'setup.cfg': DALS(
+                """
+                [sdist]
+                formats=zip
+                """
+            ),
+        }
+
+        path.build(files)
+
+        build_backend = self.get_build_backend()
+        build_backend.build_sdist("temp")
+
+    _relative_path_import_files = {
+        'setup.py': DALS(
+            """
+            __import__('setuptools').setup(
+                name='foo',
+                version=__import__('hello').__version__,
+                py_modules=['hello']
+            )"""
+        ),
+        'hello.py': '__version__ = "0.0.0"',
+        'setup.cfg': DALS(
+            """
+            [sdist]
+            formats=zip
+            """
+        ),
+    }
+
+    def test_build_sdist_relative_path_import(self, tmpdir_cwd):
+        path.build(self._relative_path_import_files)
+        build_backend = self.get_build_backend()
+        with pytest.raises(ImportError, match="^No module named 'hello'$"):
+            build_backend.build_sdist("temp")
+
+    _simple_pyproject_example = {
+        "pyproject.toml": DALS(
+            """
+            [project]
+            name = "proj"
+            version = "42"
+            """
+        ),
+        "src": {"proj": {"__init__.py": ""}},
+    }
+
+    def _assert_link_tree(self, parent_dir):
+        """All files in the directory should be either links or hard links"""
+        files = list(Path(parent_dir).glob("**/*"))
+        assert files  # Should not be empty
+        for file in files:
+            assert file.is_symlink() or os.stat(file).st_nlink > 0
+
+    def test_editable_without_config_settings(self, tmpdir_cwd):
+        """
+        Sanity check to ensure tests with --mode=strict are different from the ones
+        without --mode.
+
+        --mode=strict should create a local directory with a package tree.
+        The directory should not get created otherwise.
+        """
+        path.build(self._simple_pyproject_example)
+        build_backend = self.get_build_backend()
+        assert not Path("build").exists()
+        build_backend.build_editable("temp")
+        assert not Path("build").exists()
+
+    def test_build_wheel_inplace(self, tmpdir_cwd):
+        config_settings = {"--build-option": ["build_ext", "--inplace"]}
+        path.build(self._simple_pyproject_example)
+        build_backend = self.get_build_backend()
+        assert not Path("build").exists()
+        Path("build").mkdir()
+        build_backend.prepare_metadata_for_build_wheel("build", config_settings)
+        build_backend.build_wheel("build", config_settings)
+        assert Path("build/proj-42-py3-none-any.whl").exists()
+
+    @pytest.mark.parametrize("config_settings", [{"editable-mode": "strict"}])
+    def test_editable_with_config_settings(self, tmpdir_cwd, config_settings):
+        path.build({**self._simple_pyproject_example, '_meta': {}})
+        assert not Path("build").exists()
+        build_backend = self.get_build_backend()
+        build_backend.prepare_metadata_for_build_editable("_meta", config_settings)
+        build_backend.build_editable("temp", config_settings, "_meta")
+        self._assert_link_tree(next(Path("build").glob("__editable__.*")))
+
+    @pytest.mark.parametrize(
+        ("setup_literal", "requirements"),
+        [
+            ("'foo'", ['foo']),
+            ("['foo']", ['foo']),
+            (r"'foo\n'", ['foo']),
+            (r"'foo\n\n'", ['foo']),
+            ("['foo', 'bar']", ['foo', 'bar']),
+            (r"'# Has a comment line\nfoo'", ['foo']),
+            (r"'foo # Has an inline comment'", ['foo']),
+            (r"'foo \\\n >=3.0'", ['foo>=3.0']),
+            (r"'foo\nbar'", ['foo', 'bar']),
+            (r"'foo\nbar\n'", ['foo', 'bar']),
+            (r"['foo\n', 'bar\n']", ['foo', 'bar']),
+        ],
+    )
+    @pytest.mark.parametrize('use_wheel', [True, False])
+    def test_setup_requires(self, setup_literal, requirements, use_wheel, tmpdir_cwd):
+        files = {
+            'setup.py': DALS(
+                """
+                from setuptools import setup
+
+                setup(
+                    name="qux",
+                    version="0.0.0",
+                    py_modules=["hello"],
+                    setup_requires={setup_literal},
+                )
+            """
+            ).format(setup_literal=setup_literal),
+            'hello.py': DALS(
+                """
+            def run():
+                print('hello')
+            """
+            ),
+        }
+
+        path.build(files)
+
+        build_backend = self.get_build_backend()
+
+        if use_wheel:
+            get_requires = build_backend.get_requires_for_build_wheel
+        else:
+            get_requires = build_backend.get_requires_for_build_sdist
+
+        # Ensure that the build requirements are properly parsed
+        expected = sorted(requirements)
+        actual = get_requires()
+
+        assert expected == sorted(actual)
+
+    def test_setup_requires_with_auto_discovery(self, tmpdir_cwd):
+        # Make sure patches introduced to retrieve setup_requires don't accidentally
+        # activate auto-discovery and cause problems due to the incomplete set of
+        # attributes passed to MinimalDistribution
+        files = {
+            'pyproject.toml': DALS(
+                """
+                [project]
+                name = "proj"
+                version = "42"
+            """
+            ),
+            "setup.py": DALS(
+                """
+                __import__('setuptools').setup(
+                    setup_requires=["foo"],
+                    py_modules = ["hello", "world"]
+                )
+            """
+            ),
+            'hello.py': "'hello'",
+            'world.py': "'world'",
+        }
+        path.build(files)
+        build_backend = self.get_build_backend()
+        setup_requires = build_backend.get_requires_for_build_wheel()
+        assert setup_requires == ["foo"]
+
+    def test_dont_install_setup_requires(self, tmpdir_cwd):
+        files = {
+            'setup.py': DALS(
+                """
+                        from setuptools import setup
+
+                        setup(
+                            name="qux",
+                            version="0.0.0",
+                            py_modules=["hello"],
+                            setup_requires=["does-not-exist >99"],
+                        )
+                    """
+            ),
+            'hello.py': DALS(
+                """
+                    def run():
+                        print('hello')
+                    """
+            ),
+        }
+
+        path.build(files)
+
+        build_backend = self.get_build_backend()
+
+        dist_dir = os.path.abspath('pip-dist-info')
+        os.makedirs(dist_dir)
+
+        # does-not-exist can't be satisfied, so if it attempts to install
+        # setup_requires, it will fail.
+        build_backend.prepare_metadata_for_build_wheel(dist_dir)
+
+    _sys_argv_0_passthrough = {
+        'setup.py': DALS(
+            """
+            import os
+            import sys
+
+            __import__('setuptools').setup(
+                name='foo',
+                version='0.0.0',
+            )
+
+            sys_argv = os.path.abspath(sys.argv[0])
+            file_path = os.path.abspath('setup.py')
+            assert sys_argv == file_path
+            """
+        )
+    }
+
+    def test_sys_argv_passthrough(self, tmpdir_cwd):
+        path.build(self._sys_argv_0_passthrough)
+        build_backend = self.get_build_backend()
+        with pytest.raises(AssertionError):
+            build_backend.build_sdist("temp")
+
+    _setup_py_file_abspath = {
+        'setup.py': DALS(
+            """
+            import os
+            assert os.path.isabs(__file__)
+            __import__('setuptools').setup(
+                name='foo',
+                version='0.0.0',
+                py_modules=['hello'],
+                setup_requires=['six'],
+            )
+            """
+        )
+    }
+
+    def test_setup_py_file_abspath(self, tmpdir_cwd):
+        path.build(self._setup_py_file_abspath)
+        build_backend = self.get_build_backend()
+        build_backend.build_sdist("temp")
+
+    @pytest.mark.parametrize('build_hook', ('build_sdist', 'build_wheel'))
+    def test_build_with_empty_setuppy(self, build_backend, build_hook):
+        files = {'setup.py': ''}
+        path.build(files)
+
+        msg = re.escape('No distribution was found.')
+        with pytest.raises(ValueError, match=msg):
+            getattr(build_backend, build_hook)("temp")
+
+
+class TestBuildMetaLegacyBackend(TestBuildMetaBackend):
+    backend_name = 'setuptools.build_meta:__legacy__'
+
+    # build_meta_legacy-specific tests
+    def test_build_sdist_relative_path_import(self, tmpdir_cwd):
+        # This must fail in build_meta, but must pass in build_meta_legacy
+        path.build(self._relative_path_import_files)
+
+        build_backend = self.get_build_backend()
+        build_backend.build_sdist("temp")
+
+    def test_sys_argv_passthrough(self, tmpdir_cwd):
+        path.build(self._sys_argv_0_passthrough)
+
+        build_backend = self.get_build_backend()
+        build_backend.build_sdist("temp")
+
+
+def test_legacy_editable_install(venv, tmpdir, tmpdir_cwd):
+    pyproject = """
+    [build-system]
+    requires = ["setuptools"]
+    build-backend = "setuptools.build_meta"
+    [project]
+    name = "myproj"
+    version = "42"
+    """
+    path.build({"pyproject.toml": DALS(pyproject), "mymod.py": ""})
+
+    # First: sanity check
+    cmd = ["pip", "install", "--no-build-isolation", "-e", "."]
+    output = venv.run(cmd, cwd=tmpdir).lower()
+    assert "running setup.py develop for myproj" not in output
+    assert "created wheel for myproj" in output
+
+    # Then: real test
+    env = {**os.environ, "SETUPTOOLS_ENABLE_FEATURES": "legacy-editable"}
+    cmd = ["pip", "install", "--no-build-isolation", "-e", "."]
+    output = venv.run(cmd, cwd=tmpdir, env=env).lower()
+    assert "running setup.py develop for myproj" in output
+
+
+@pytest.mark.filterwarnings("ignore::setuptools.SetuptoolsDeprecationWarning")
+def test_sys_exit_0_in_setuppy(monkeypatch, tmp_path):
+    """Setuptools should be resilient to setup.py with ``sys.exit(0)`` (#3973)."""
+    monkeypatch.chdir(tmp_path)
+    setuppy = """
+        import sys, setuptools
+        setuptools.setup(name='foo', version='0.0.0')
+        sys.exit(0)
+        """
+    (tmp_path / "setup.py").write_text(DALS(setuppy), encoding="utf-8")
+    backend = BuildBackend(backend_name="setuptools.build_meta")
+    assert backend.get_requires_for_build_wheel() == []
+
+
+def test_system_exit_in_setuppy(monkeypatch, tmp_path):
+    monkeypatch.chdir(tmp_path)
+    setuppy = "import sys; sys.exit('some error')"
+    (tmp_path / "setup.py").write_text(setuppy, encoding="utf-8")
+    with pytest.raises(SystemExit, match="some error"):
+        backend = BuildBackend(backend_name="setuptools.build_meta")
+        backend.get_requires_for_build_wheel()
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build_py.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build_py.py
new file mode 100644
index 0000000000000000000000000000000000000000..e64cfa2e4bee38371c0e9194c4dc41457d492f7b
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_build_py.py
@@ -0,0 +1,480 @@
+import os
+import shutil
+import stat
+import warnings
+from pathlib import Path
+from unittest.mock import Mock
+
+import jaraco.path
+import pytest
+
+from setuptools import SetuptoolsDeprecationWarning
+from setuptools.dist import Distribution
+
+from .textwrap import DALS
+
+
+def test_directories_in_package_data_glob(tmpdir_cwd):
+    """
+    Directories matching the glob in package_data should
+    not be included in the package data.
+
+    Regression test for #261.
+    """
+    dist = Distribution(
+        dict(
+            script_name='setup.py',
+            script_args=['build_py'],
+            packages=[''],
+            package_data={'': ['path/*']},
+        )
+    )
+    os.makedirs('path/subpath')
+    dist.parse_command_line()
+    dist.run_commands()
+
+
+def test_recursive_in_package_data_glob(tmpdir_cwd):
+    """
+    Files matching recursive globs (**) in package_data should
+    be included in the package data.
+
+    #1806
+    """
+    dist = Distribution(
+        dict(
+            script_name='setup.py',
+            script_args=['build_py'],
+            packages=[''],
+            package_data={'': ['path/**/data']},
+        )
+    )
+    os.makedirs('path/subpath/subsubpath')
+    open('path/subpath/subsubpath/data', 'wb').close()
+
+    dist.parse_command_line()
+    dist.run_commands()
+
+    assert stat.S_ISREG(os.stat('build/lib/path/subpath/subsubpath/data').st_mode), (
+        "File is not included"
+    )
+
+
+def test_read_only(tmpdir_cwd):
+    """
+    Ensure read-only flag is not preserved in copy
+    for package modules and package data, as that
+    causes problems with deleting read-only files on
+    Windows.
+
+    #1451
+    """
+    dist = Distribution(
+        dict(
+            script_name='setup.py',
+            script_args=['build_py'],
+            packages=['pkg'],
+            package_data={'pkg': ['data.dat']},
+        )
+    )
+    os.makedirs('pkg')
+    open('pkg/__init__.py', 'wb').close()
+    open('pkg/data.dat', 'wb').close()
+    os.chmod('pkg/__init__.py', stat.S_IREAD)
+    os.chmod('pkg/data.dat', stat.S_IREAD)
+    dist.parse_command_line()
+    dist.run_commands()
+    shutil.rmtree('build')
+
+
+@pytest.mark.xfail(
+    'platform.system() == "Windows"',
+    reason="On Windows, files do not have executable bits",
+    raises=AssertionError,
+    strict=True,
+)
+def test_executable_data(tmpdir_cwd):
+    """
+    Ensure executable bit is preserved in copy for
+    package data, as users rely on it for scripts.
+
+    #2041
+    """
+    dist = Distribution(
+        dict(
+            script_name='setup.py',
+            script_args=['build_py'],
+            packages=['pkg'],
+            package_data={'pkg': ['run-me']},
+        )
+    )
+    os.makedirs('pkg')
+    open('pkg/__init__.py', 'wb').close()
+    open('pkg/run-me', 'wb').close()
+    os.chmod('pkg/run-me', 0o700)
+
+    dist.parse_command_line()
+    dist.run_commands()
+
+    assert os.stat('build/lib/pkg/run-me').st_mode & stat.S_IEXEC, (
+        "Script is not executable"
+    )
+
+
+EXAMPLE_WITH_MANIFEST = {
+    "setup.cfg": DALS(
+        """
+        [metadata]
+        name = mypkg
+        version = 42
+
+        [options]
+        include_package_data = True
+        packages = find:
+
+        [options.packages.find]
+        exclude = *.tests*
+        """
+    ),
+    "mypkg": {
+        "__init__.py": "",
+        "resource_file.txt": "",
+        "tests": {
+            "__init__.py": "",
+            "test_mypkg.py": "",
+            "test_file.txt": "",
+        },
+    },
+    "MANIFEST.in": DALS(
+        """
+        global-include *.py *.txt
+        global-exclude *.py[cod]
+        prune dist
+        prune build
+        prune *.egg-info
+        """
+    ),
+}
+
+
+def test_excluded_subpackages(tmpdir_cwd):
+    jaraco.path.build(EXAMPLE_WITH_MANIFEST)
+    dist = Distribution({"script_name": "%PEP 517%"})
+    dist.parse_config_files()
+
+    build_py = dist.get_command_obj("build_py")
+
+    msg = r"Python recognizes 'mypkg\.tests' as an importable package"
+    with pytest.warns(SetuptoolsDeprecationWarning, match=msg):
+        # TODO: To fix #3260 we need some transition period to deprecate the
+        # existing behavior of `include_package_data`. After the transition, we
+        # should remove the warning and fix the behaviour.
+
+        if os.getenv("SETUPTOOLS_USE_DISTUTILS") == "stdlib":
+            # pytest.warns reset the warning filter temporarily
+            # https://github.com/pytest-dev/pytest/issues/4011#issuecomment-423494810
+            warnings.filterwarnings(
+                "ignore",
+                "'encoding' argument not specified",
+                module="distutils.text_file",
+                # This warning is already fixed in pypa/distutils but not in stdlib
+            )
+
+        build_py.finalize_options()
+        build_py.run()
+
+    build_dir = Path(dist.get_command_obj("build_py").build_lib)
+    assert (build_dir / "mypkg/__init__.py").exists()
+    assert (build_dir / "mypkg/resource_file.txt").exists()
+
+    # Setuptools is configured to ignore `mypkg.tests`, therefore the following
+    # files/dirs should not be included in the distribution.
+    for f in [
+        "mypkg/tests/__init__.py",
+        "mypkg/tests/test_mypkg.py",
+        "mypkg/tests/test_file.txt",
+        "mypkg/tests",
+    ]:
+        with pytest.raises(AssertionError):
+            # TODO: Enforce the following assertion once #3260 is fixed
+            # (remove context manager and the following xfail).
+            assert not (build_dir / f).exists()
+
+    pytest.xfail("#3260")
+
+
+@pytest.mark.filterwarnings("ignore::setuptools.SetuptoolsDeprecationWarning")
+def test_existing_egg_info(tmpdir_cwd, monkeypatch):
+    """When provided with the ``existing_egg_info_dir`` attribute, build_py should not
+    attempt to run egg_info again.
+    """
+    # == Pre-condition ==
+    # Generate an egg-info dir
+    jaraco.path.build(EXAMPLE_WITH_MANIFEST)
+    dist = Distribution({"script_name": "%PEP 517%"})
+    dist.parse_config_files()
+    assert dist.include_package_data
+
+    egg_info = dist.get_command_obj("egg_info")
+    dist.run_command("egg_info")
+    egg_info_dir = next(Path(egg_info.egg_base).glob("*.egg-info"))
+    assert egg_info_dir.is_dir()
+
+    # == Setup ==
+    build_py = dist.get_command_obj("build_py")
+    build_py.finalize_options()
+    egg_info = dist.get_command_obj("egg_info")
+    egg_info_run = Mock(side_effect=egg_info.run)
+    monkeypatch.setattr(egg_info, "run", egg_info_run)
+
+    # == Remove caches ==
+    # egg_info is called when build_py looks for data_files, which gets cached.
+    # We need to ensure it is not cached yet, otherwise it may impact on the tests
+    build_py.__dict__.pop('data_files', None)
+    dist.reinitialize_command(egg_info)
+
+    # == Sanity check ==
+    # Ensure that if existing_egg_info is not given, build_py attempts to run egg_info
+    build_py.existing_egg_info_dir = None
+    build_py.run()
+    egg_info_run.assert_called()
+
+    # == Remove caches ==
+    egg_info_run.reset_mock()
+    build_py.__dict__.pop('data_files', None)
+    dist.reinitialize_command(egg_info)
+
+    # == Actual test ==
+    # Ensure that if existing_egg_info_dir is given, egg_info doesn't run
+    build_py.existing_egg_info_dir = egg_info_dir
+    build_py.run()
+    egg_info_run.assert_not_called()
+    assert build_py.data_files
+
+    # Make sure the list of outputs is actually OK
+    outputs = map(lambda x: x.replace(os.sep, "/"), build_py.get_outputs())
+    assert outputs
+    example = str(Path(build_py.build_lib, "mypkg/__init__.py")).replace(os.sep, "/")
+    assert example in outputs
+
+
+EXAMPLE_ARBITRARY_MAPPING = {
+    "pyproject.toml": DALS(
+        """
+        [project]
+        name = "mypkg"
+        version = "42"
+
+        [tool.setuptools]
+        packages = ["mypkg", "mypkg.sub1", "mypkg.sub2", "mypkg.sub2.nested"]
+
+        [tool.setuptools.package-dir]
+        "" = "src"
+        "mypkg.sub2" = "src/mypkg/_sub2"
+        "mypkg.sub2.nested" = "other"
+        """
+    ),
+    "src": {
+        "mypkg": {
+            "__init__.py": "",
+            "resource_file.txt": "",
+            "sub1": {
+                "__init__.py": "",
+                "mod1.py": "",
+            },
+            "_sub2": {
+                "mod2.py": "",
+            },
+        },
+    },
+    "other": {
+        "__init__.py": "",
+        "mod3.py": "",
+    },
+    "MANIFEST.in": DALS(
+        """
+        global-include *.py *.txt
+        global-exclude *.py[cod]
+        """
+    ),
+}
+
+
+def test_get_outputs(tmpdir_cwd):
+    jaraco.path.build(EXAMPLE_ARBITRARY_MAPPING)
+    dist = Distribution({"script_name": "%test%"})
+    dist.parse_config_files()
+
+    build_py = dist.get_command_obj("build_py")
+    build_py.editable_mode = True
+    build_py.ensure_finalized()
+    build_lib = build_py.build_lib.replace(os.sep, "/")
+    outputs = {x.replace(os.sep, "/") for x in build_py.get_outputs()}
+    assert outputs == {
+        f"{build_lib}/mypkg/__init__.py",
+        f"{build_lib}/mypkg/resource_file.txt",
+        f"{build_lib}/mypkg/sub1/__init__.py",
+        f"{build_lib}/mypkg/sub1/mod1.py",
+        f"{build_lib}/mypkg/sub2/mod2.py",
+        f"{build_lib}/mypkg/sub2/nested/__init__.py",
+        f"{build_lib}/mypkg/sub2/nested/mod3.py",
+    }
+    mapping = {
+        k.replace(os.sep, "/"): v.replace(os.sep, "/")
+        for k, v in build_py.get_output_mapping().items()
+    }
+    assert mapping == {
+        f"{build_lib}/mypkg/__init__.py": "src/mypkg/__init__.py",
+        f"{build_lib}/mypkg/resource_file.txt": "src/mypkg/resource_file.txt",
+        f"{build_lib}/mypkg/sub1/__init__.py": "src/mypkg/sub1/__init__.py",
+        f"{build_lib}/mypkg/sub1/mod1.py": "src/mypkg/sub1/mod1.py",
+        f"{build_lib}/mypkg/sub2/mod2.py": "src/mypkg/_sub2/mod2.py",
+        f"{build_lib}/mypkg/sub2/nested/__init__.py": "other/__init__.py",
+        f"{build_lib}/mypkg/sub2/nested/mod3.py": "other/mod3.py",
+    }
+
+
+class TestTypeInfoFiles:
+    PYPROJECTS = {
+        "default_pyproject": DALS(
+            """
+            [project]
+            name = "foo"
+            version = "1"
+            """
+        ),
+        "dont_include_package_data": DALS(
+            """
+            [project]
+            name = "foo"
+            version = "1"
+
+            [tool.setuptools]
+            include-package-data = false
+            """
+        ),
+        "exclude_type_info": DALS(
+            """
+            [project]
+            name = "foo"
+            version = "1"
+
+            [tool.setuptools]
+            include-package-data = false
+
+            [tool.setuptools.exclude-package-data]
+            "*" = ["py.typed", "*.pyi"]
+            """
+        ),
+    }
+
+    EXAMPLES = {
+        "simple_namespace": {
+            "directory_structure": {
+                "foo": {
+                    "bar.pyi": "",
+                    "py.typed": "",
+                    "__init__.py": "",
+                }
+            },
+            "expected_type_files": {"foo/bar.pyi", "foo/py.typed"},
+        },
+        "nested_inside_namespace": {
+            "directory_structure": {
+                "foo": {
+                    "bar": {
+                        "py.typed": "",
+                        "mod.pyi": "",
+                    }
+                }
+            },
+            "expected_type_files": {"foo/bar/mod.pyi", "foo/bar/py.typed"},
+        },
+        "namespace_nested_inside_regular": {
+            "directory_structure": {
+                "foo": {
+                    "namespace": {
+                        "foo.pyi": "",
+                    },
+                    "__init__.pyi": "",
+                    "py.typed": "",
+                }
+            },
+            "expected_type_files": {
+                "foo/namespace/foo.pyi",
+                "foo/__init__.pyi",
+                "foo/py.typed",
+            },
+        },
+    }
+
+    @pytest.mark.parametrize(
+        "pyproject",
+        [
+            "default_pyproject",
+            pytest.param(
+                "dont_include_package_data",
+                marks=pytest.mark.xfail(reason="pypa/setuptools#4350"),
+            ),
+        ],
+    )
+    @pytest.mark.parametrize("example", EXAMPLES.keys())
+    def test_type_files_included_by_default(self, tmpdir_cwd, pyproject, example):
+        structure = {
+            **self.EXAMPLES[example]["directory_structure"],
+            "pyproject.toml": self.PYPROJECTS[pyproject],
+        }
+        expected_type_files = self.EXAMPLES[example]["expected_type_files"]
+        jaraco.path.build(structure)
+
+        build_py = get_finalized_build_py()
+        outputs = get_outputs(build_py)
+        assert expected_type_files <= outputs
+
+    @pytest.mark.parametrize("pyproject", ["exclude_type_info"])
+    @pytest.mark.parametrize("example", EXAMPLES.keys())
+    def test_type_files_can_be_excluded(self, tmpdir_cwd, pyproject, example):
+        structure = {
+            **self.EXAMPLES[example]["directory_structure"],
+            "pyproject.toml": self.PYPROJECTS[pyproject],
+        }
+        expected_type_files = self.EXAMPLES[example]["expected_type_files"]
+        jaraco.path.build(structure)
+
+        build_py = get_finalized_build_py()
+        outputs = get_outputs(build_py)
+        assert expected_type_files.isdisjoint(outputs)
+
+    def test_stub_only_package(self, tmpdir_cwd):
+        structure = {
+            "pyproject.toml": DALS(
+                """
+                [project]
+                name = "foo-stubs"
+                version = "1"
+                """
+            ),
+            "foo-stubs": {"__init__.pyi": "", "bar.pyi": ""},
+        }
+        expected_type_files = {"foo-stubs/__init__.pyi", "foo-stubs/bar.pyi"}
+        jaraco.path.build(structure)
+
+        build_py = get_finalized_build_py()
+        outputs = get_outputs(build_py)
+        assert expected_type_files <= outputs
+
+
+def get_finalized_build_py(script_name="%build_py-test%"):
+    dist = Distribution({"script_name": script_name})
+    dist.parse_config_files()
+    build_py = dist.get_command_obj("build_py")
+    build_py.finalize_options()
+    return build_py
+
+
+def get_outputs(build_py):
+    build_dir = Path(build_py.build_lib)
+    return {
+        os.path.relpath(x, build_dir).replace(os.sep, "/")
+        for x in build_py.get_outputs()
+    }
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_core_metadata.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_core_metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1edb79b404dd85a38b074cf52b4299049f304c7
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_core_metadata.py
@@ -0,0 +1,577 @@
+from __future__ import annotations
+
+import functools
+import importlib
+import io
+from email import message_from_string
+from email.generator import Generator
+from email.message import EmailMessage, Message
+from email.parser import Parser
+from email.policy import EmailPolicy
+from inspect import cleandoc
+from pathlib import Path
+from unittest.mock import Mock
+
+import pytest
+from packaging.metadata import Metadata
+from packaging.requirements import Requirement
+
+from setuptools import _reqs, sic
+from setuptools._core_metadata import rfc822_escape, rfc822_unescape
+from setuptools.command.egg_info import egg_info, write_requirements
+from setuptools.config import expand, setupcfg
+from setuptools.dist import Distribution
+
+from .config.downloads import retrieve_file, urls_from_file
+
+EXAMPLE_BASE_INFO = dict(
+    name="package",
+    version="0.0.1",
+    author="Foo Bar",
+    author_email="foo@bar.net",
+    long_description="Long\ndescription",
+    description="Short description",
+    keywords=["one", "two"],
+)
+
+
+@pytest.mark.parametrize(
+    ("content", "result"),
+    (
+        pytest.param(
+            "Just a single line",
+            None,
+            id="single_line",
+        ),
+        pytest.param(
+            "Multiline\nText\nwithout\nextra indents\n",
+            None,
+            id="multiline",
+        ),
+        pytest.param(
+            "Multiline\n    With\n\nadditional\n  indentation",
+            None,
+            id="multiline_with_indentation",
+        ),
+        pytest.param(
+            "  Leading whitespace",
+            "Leading whitespace",
+            id="remove_leading_whitespace",
+        ),
+        pytest.param(
+            "  Leading whitespace\nIn\n    Multiline comment",
+            "Leading whitespace\nIn\n    Multiline comment",
+            id="remove_leading_whitespace_multiline",
+        ),
+    ),
+)
+def test_rfc822_unescape(content, result):
+    assert (result or content) == rfc822_unescape(rfc822_escape(content))
+
+
+def __read_test_cases():
+    base = EXAMPLE_BASE_INFO
+
+    params = functools.partial(dict, base)
+
+    return [
+        ('Metadata version 1.0', params()),
+        (
+            'Metadata Version 1.0: Short long description',
+            params(
+                long_description='Short long description',
+            ),
+        ),
+        (
+            'Metadata version 1.1: Classifiers',
+            params(
+                classifiers=[
+                    'Programming Language :: Python :: 3',
+                    'Programming Language :: Python :: 3.7',
+                    'License :: OSI Approved :: MIT License',
+                ],
+            ),
+        ),
+        (
+            'Metadata version 1.1: Download URL',
+            params(
+                download_url='https://example.com',
+            ),
+        ),
+        (
+            'Metadata Version 1.2: Requires-Python',
+            params(
+                python_requires='>=3.7',
+            ),
+        ),
+        pytest.param(
+            'Metadata Version 1.2: Project-Url',
+            params(project_urls=dict(Foo='https://example.bar')),
+            marks=pytest.mark.xfail(
+                reason="Issue #1578: project_urls not read",
+            ),
+        ),
+        (
+            'Metadata Version 2.1: Long Description Content Type',
+            params(
+                long_description_content_type='text/x-rst; charset=UTF-8',
+            ),
+        ),
+        (
+            'License',
+            params(
+                license='MIT',
+            ),
+        ),
+        (
+            'License multiline',
+            params(
+                license='This is a long license \nover multiple lines',
+            ),
+        ),
+        pytest.param(
+            'Metadata Version 2.1: Provides Extra',
+            params(provides_extras=['foo', 'bar']),
+            marks=pytest.mark.xfail(reason="provides_extras not read"),
+        ),
+        (
+            'Missing author',
+            dict(
+                name='foo',
+                version='1.0.0',
+                author_email='snorri@sturluson.name',
+            ),
+        ),
+        (
+            'Missing author e-mail',
+            dict(
+                name='foo',
+                version='1.0.0',
+                author='Snorri Sturluson',
+            ),
+        ),
+        (
+            'Missing author and e-mail',
+            dict(
+                name='foo',
+                version='1.0.0',
+            ),
+        ),
+        (
+            'Bypass normalized version',
+            dict(
+                name='foo',
+                version=sic('1.0.0a'),
+            ),
+        ),
+    ]
+
+
+@pytest.mark.parametrize(("name", "attrs"), __read_test_cases())
+def test_read_metadata(name, attrs):
+    dist = Distribution(attrs)
+    metadata_out = dist.metadata
+    dist_class = metadata_out.__class__
+
+    # Write to PKG_INFO and then load into a new metadata object
+    PKG_INFO = io.StringIO()
+
+    metadata_out.write_pkg_file(PKG_INFO)
+    PKG_INFO.seek(0)
+    pkg_info = PKG_INFO.read()
+    assert _valid_metadata(pkg_info)
+
+    PKG_INFO.seek(0)
+    metadata_in = dist_class()
+    metadata_in.read_pkg_file(PKG_INFO)
+
+    tested_attrs = [
+        ('name', dist_class.get_name),
+        ('version', dist_class.get_version),
+        ('author', dist_class.get_contact),
+        ('author_email', dist_class.get_contact_email),
+        ('metadata_version', dist_class.get_metadata_version),
+        ('provides', dist_class.get_provides),
+        ('description', dist_class.get_description),
+        ('long_description', dist_class.get_long_description),
+        ('download_url', dist_class.get_download_url),
+        ('keywords', dist_class.get_keywords),
+        ('platforms', dist_class.get_platforms),
+        ('obsoletes', dist_class.get_obsoletes),
+        ('requires', dist_class.get_requires),
+        ('classifiers', dist_class.get_classifiers),
+        ('project_urls', lambda s: getattr(s, 'project_urls', {})),
+        ('provides_extras', lambda s: getattr(s, 'provides_extras', {})),
+    ]
+
+    for attr, getter in tested_attrs:
+        assert getter(metadata_in) == getter(metadata_out)
+
+
+def __maintainer_test_cases():
+    attrs = {"name": "package", "version": "1.0", "description": "xxx"}
+
+    def merge_dicts(d1, d2):
+        d1 = d1.copy()
+        d1.update(d2)
+
+        return d1
+
+    return [
+        ('No author, no maintainer', attrs.copy()),
+        (
+            'Author (no e-mail), no maintainer',
+            merge_dicts(attrs, {'author': 'Author Name'}),
+        ),
+        (
+            'Author (e-mail), no maintainer',
+            merge_dicts(
+                attrs, {'author': 'Author Name', 'author_email': 'author@name.com'}
+            ),
+        ),
+        (
+            'No author, maintainer (no e-mail)',
+            merge_dicts(attrs, {'maintainer': 'Maintainer Name'}),
+        ),
+        (
+            'No author, maintainer (e-mail)',
+            merge_dicts(
+                attrs,
+                {
+                    'maintainer': 'Maintainer Name',
+                    'maintainer_email': 'maintainer@name.com',
+                },
+            ),
+        ),
+        (
+            'Author (no e-mail), Maintainer (no-email)',
+            merge_dicts(
+                attrs, {'author': 'Author Name', 'maintainer': 'Maintainer Name'}
+            ),
+        ),
+        (
+            'Author (e-mail), Maintainer (e-mail)',
+            merge_dicts(
+                attrs,
+                {
+                    'author': 'Author Name',
+                    'author_email': 'author@name.com',
+                    'maintainer': 'Maintainer Name',
+                    'maintainer_email': 'maintainer@name.com',
+                },
+            ),
+        ),
+        (
+            'No author (e-mail), no maintainer (e-mail)',
+            merge_dicts(
+                attrs,
+                {
+                    'author_email': 'author@name.com',
+                    'maintainer_email': 'maintainer@name.com',
+                },
+            ),
+        ),
+        ('Author unicode', merge_dicts(attrs, {'author': '鉄沢寛'})),
+        ('Maintainer unicode', merge_dicts(attrs, {'maintainer': 'Jan Łukasiewicz'})),
+    ]
+
+
+@pytest.mark.parametrize(("name", "attrs"), __maintainer_test_cases())
+def test_maintainer_author(name, attrs, tmpdir):
+    tested_keys = {
+        'author': 'Author',
+        'author_email': 'Author-email',
+        'maintainer': 'Maintainer',
+        'maintainer_email': 'Maintainer-email',
+    }
+
+    # Generate a PKG-INFO file
+    dist = Distribution(attrs)
+    fn = tmpdir.mkdir('pkg_info')
+    fn_s = str(fn)
+
+    dist.metadata.write_pkg_info(fn_s)
+
+    with open(str(fn.join('PKG-INFO')), 'r', encoding='utf-8') as f:
+        pkg_info = f.read()
+
+    assert _valid_metadata(pkg_info)
+
+    # Drop blank lines and strip lines from default description
+    raw_pkg_lines = pkg_info.splitlines()
+    pkg_lines = list(filter(None, raw_pkg_lines[:-2]))
+
+    pkg_lines_set = set(pkg_lines)
+
+    # Duplicate lines should not be generated
+    assert len(pkg_lines) == len(pkg_lines_set)
+
+    for fkey, dkey in tested_keys.items():
+        val = attrs.get(dkey, None)
+        if val is None:
+            for line in pkg_lines:
+                assert not line.startswith(fkey + ':')
+        else:
+            line = f'{fkey}: {val}'
+            assert line in pkg_lines_set
+
+
+class TestParityWithMetadataFromPyPaWheel:
+    def base_example(self):
+        attrs = dict(
+            **EXAMPLE_BASE_INFO,
+            # Example with complex requirement definition
+            python_requires=">=3.8",
+            install_requires="""
+            packaging==23.2
+            more-itertools==8.8.0; extra == "other"
+            jaraco.text==3.7.0
+            importlib-resources==5.10.2; python_version<"3.8"
+            importlib-metadata==6.0.0 ; python_version<"3.8"
+            colorama>=0.4.4; sys_platform == "win32"
+            """,
+            extras_require={
+                "testing": """
+                    pytest >= 6
+                    pytest-checkdocs >= 2.4
+                    tomli ; \\
+                            # Using stdlib when possible
+                            python_version < "3.11"
+                    ini2toml[lite]>=0.9
+                    """,
+                "other": [],
+            },
+        )
+        # Generate a PKG-INFO file using setuptools
+        return Distribution(attrs)
+
+    def test_requires_dist(self, tmp_path):
+        dist = self.base_example()
+        pkg_info = _get_pkginfo(dist)
+        assert _valid_metadata(pkg_info)
+
+        # Ensure Requires-Dist is present
+        expected = [
+            'Metadata-Version:',
+            'Requires-Python: >=3.8',
+            'Provides-Extra: other',
+            'Provides-Extra: testing',
+            'Requires-Dist: tomli; python_version < "3.11" and extra == "testing"',
+            'Requires-Dist: more-itertools==8.8.0; extra == "other"',
+            'Requires-Dist: ini2toml[lite]>=0.9; extra == "testing"',
+        ]
+        for line in expected:
+            assert line in pkg_info
+
+    HERE = Path(__file__).parent
+    EXAMPLES_FILE = HERE / "config/setupcfg_examples.txt"
+
+    @pytest.fixture(params=[None, *urls_from_file(EXAMPLES_FILE)])
+    def dist(self, request, monkeypatch, tmp_path):
+        """Example of distribution with arbitrary configuration"""
+        monkeypatch.chdir(tmp_path)
+        monkeypatch.setattr(expand, "read_attr", Mock(return_value="0.42"))
+        monkeypatch.setattr(expand, "read_files", Mock(return_value="hello world"))
+        if request.param is None:
+            yield self.base_example()
+        else:
+            # Real-world usage
+            config = retrieve_file(request.param)
+            yield setupcfg.apply_configuration(Distribution({}), config)
+
+    @pytest.mark.uses_network
+    def test_equivalent_output(self, tmp_path, dist):
+        """Ensure output from setuptools is equivalent to the one from `pypa/wheel`"""
+        # Generate a METADATA file using pypa/wheel for comparison
+        wheel_metadata = importlib.import_module("wheel.metadata")
+        pkginfo_to_metadata = getattr(wheel_metadata, "pkginfo_to_metadata", None)
+
+        if pkginfo_to_metadata is None:  # pragma: nocover
+            pytest.xfail(
+                "wheel.metadata.pkginfo_to_metadata is undefined, "
+                "(this is likely to be caused by API changes in pypa/wheel"
+            )
+
+        # Generate an simplified "egg-info" dir for pypa/wheel to convert
+        pkg_info = _get_pkginfo(dist)
+        egg_info_dir = tmp_path / "pkg.egg-info"
+        egg_info_dir.mkdir(parents=True)
+        (egg_info_dir / "PKG-INFO").write_text(pkg_info, encoding="utf-8")
+        write_requirements(egg_info(dist), egg_info_dir, egg_info_dir / "requires.txt")
+
+        # Get pypa/wheel generated METADATA but normalize requirements formatting
+        metadata_msg = pkginfo_to_metadata(egg_info_dir, egg_info_dir / "PKG-INFO")
+        metadata_str = _normalize_metadata(metadata_msg)
+        pkg_info_msg = message_from_string(pkg_info)
+        pkg_info_str = _normalize_metadata(pkg_info_msg)
+
+        # Compare setuptools PKG-INFO x pypa/wheel METADATA
+        assert metadata_str == pkg_info_str
+
+        # Make sure it parses/serializes well in pypa/wheel
+        _assert_roundtrip_message(pkg_info)
+
+
+class TestPEP643:
+    STATIC_CONFIG = {
+        "setup.cfg": cleandoc(
+            """
+            [metadata]
+            name = package
+            version = 0.0.1
+            author = Foo Bar
+            author_email = foo@bar.net
+            long_description = Long
+                               description
+            description = Short description
+            keywords = one, two
+            platforms = abcd
+            [options]
+            install_requires = requests
+            """
+        ),
+        "pyproject.toml": cleandoc(
+            """
+            [project]
+            name = "package"
+            version = "0.0.1"
+            authors = [
+              {name = "Foo Bar", email = "foo@bar.net"}
+            ]
+            description = "Short description"
+            readme = {text = "Long\\ndescription", content-type = "text/plain"}
+            keywords = ["one", "two"]
+            dependencies = ["requests"]
+            [tool.setuptools]
+            provides = ["abcd"]
+            obsoletes = ["abcd"]
+            """
+        ),
+    }
+
+    @pytest.mark.parametrize("file", STATIC_CONFIG.keys())
+    def test_static_config_has_no_dynamic(self, file, tmpdir_cwd):
+        Path(file).write_text(self.STATIC_CONFIG[file], encoding="utf-8")
+        metadata = _get_metadata()
+        assert metadata.get_all("Dynamic") is None
+        assert metadata.get_all("dynamic") is None
+
+    @pytest.mark.parametrize("file", STATIC_CONFIG.keys())
+    @pytest.mark.parametrize(
+        "fields",
+        [
+            # Single dynamic field
+            {"requires-python": ("python_requires", ">=3.12")},
+            {"author-email": ("author_email", "snoopy@peanuts.com")},
+            {"keywords": ("keywords", ["hello", "world"])},
+            {"platform": ("platforms", ["abcd"])},
+            # Multiple dynamic fields
+            {
+                "summary": ("description", "hello world"),
+                "description": ("long_description", "bla bla bla bla"),
+                "requires-dist": ("install_requires", ["hello-world"]),
+            },
+        ],
+    )
+    def test_modified_fields_marked_as_dynamic(self, file, fields, tmpdir_cwd):
+        # We start with a static config
+        Path(file).write_text(self.STATIC_CONFIG[file], encoding="utf-8")
+        dist = _makedist()
+
+        # ... but then we simulate the effects of a plugin modifying the distribution
+        for attr, value in fields.values():
+            # `dist` and `dist.metadata` are complicated...
+            # Some attributes work when set on `dist`, others on `dist.metadata`...
+            # Here we set in both just in case (this also avoids calling `_finalize_*`)
+            setattr(dist, attr, value)
+            setattr(dist.metadata, attr, value)
+
+        # Then we should be able to list the modified fields as Dynamic
+        metadata = _get_metadata(dist)
+        assert set(metadata.get_all("Dynamic")) == set(fields)
+
+
+def _makedist(**attrs):
+    dist = Distribution(attrs)
+    dist.parse_config_files()
+    return dist
+
+
+def _assert_roundtrip_message(metadata: str) -> None:
+    """Emulate the way wheel.bdist_wheel parses and regenerates the message,
+    then ensures the metadata generated by setuptools is compatible.
+    """
+    with io.StringIO(metadata) as buffer:
+        msg = Parser(EmailMessage).parse(buffer)
+
+    serialization_policy = EmailPolicy(
+        utf8=True,
+        mangle_from_=False,
+        max_line_length=0,
+    )
+    with io.BytesIO() as buffer:
+        out = io.TextIOWrapper(buffer, encoding="utf-8")
+        Generator(out, policy=serialization_policy).flatten(msg)
+        out.flush()
+        regenerated = buffer.getvalue()
+
+    raw_metadata = bytes(metadata, "utf-8")
+    # Normalise newlines to avoid test errors on Windows:
+    raw_metadata = b"\n".join(raw_metadata.splitlines())
+    regenerated = b"\n".join(regenerated.splitlines())
+    assert regenerated == raw_metadata
+
+
+def _normalize_metadata(msg: Message) -> str:
+    """Allow equivalent metadata to be compared directly"""
+    # The main challenge regards the requirements and extras.
+    # Both setuptools and wheel already apply some level of normalization
+    # but they differ regarding which character is chosen, according to the
+    # following spec it should be "-":
+    # https://packaging.python.org/en/latest/specifications/name-normalization/
+
+    # Related issues:
+    # https://github.com/pypa/packaging/issues/845
+    # https://github.com/pypa/packaging/issues/644#issuecomment-2429813968
+
+    extras = {x.replace("_", "-"): x for x in msg.get_all("Provides-Extra", [])}
+    reqs = [
+        _normalize_req(req, extras)
+        for req in _reqs.parse(msg.get_all("Requires-Dist", []))
+    ]
+    del msg["Requires-Dist"]
+    del msg["Provides-Extra"]
+
+    # Ensure consistent ord
+    for req in sorted(reqs):
+        msg["Requires-Dist"] = req
+    for extra in sorted(extras):
+        msg["Provides-Extra"] = extra
+
+    # TODO: Handle lack of PEP 643 implementation in pypa/wheel?
+    del msg["Metadata-Version"]
+
+    return msg.as_string()
+
+
+def _normalize_req(req: Requirement, extras: dict[str, str]) -> str:
+    """Allow equivalent requirement objects to be compared directly"""
+    as_str = str(req).replace(req.name, req.name.replace("_", "-"))
+    for norm, orig in extras.items():
+        as_str = as_str.replace(orig, norm)
+    return as_str
+
+
+def _get_pkginfo(dist: Distribution):
+    with io.StringIO() as fp:
+        dist.metadata.write_pkg_file(fp)
+        return fp.getvalue()
+
+
+def _get_metadata(dist: Distribution | None = None):
+    return message_from_string(_get_pkginfo(dist or _makedist()))
+
+
+def _valid_metadata(text: str) -> bool:
+    metadata = Metadata.from_email(text, validate=True)  # can raise exceptions
+    return metadata is not None
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_depends.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_depends.py
new file mode 100644
index 0000000000000000000000000000000000000000..1714c041f7a23e1ecbfc3245bf964f75c13734ca
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_depends.py
@@ -0,0 +1,15 @@
+import sys
+
+from setuptools import depends
+
+
+class TestGetModuleConstant:
+    def test_basic(self):
+        """
+        Invoke get_module_constant on a module in
+        the test package.
+        """
+        mod_name = 'setuptools.tests.mod_with_constant'
+        val = depends.get_module_constant(mod_name, 'value')
+        assert val == 'three, sir!'
+        assert 'setuptools.tests.mod_with_constant' not in sys.modules
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_develop.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_develop.py
new file mode 100644
index 0000000000000000000000000000000000000000..929fa9c285eb4d11e646dab2864be6d5fa023e2b
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_develop.py
@@ -0,0 +1,175 @@
+"""develop tests"""
+
+import os
+import pathlib
+import platform
+import subprocess
+import sys
+
+import pytest
+
+from setuptools._path import paths_on_pythonpath
+from setuptools.command.develop import develop
+from setuptools.dist import Distribution
+
+from . import contexts, namespaces
+
+SETUP_PY = """\
+from setuptools import setup
+
+setup(name='foo',
+    packages=['foo'],
+)
+"""
+
+INIT_PY = """print "foo"
+"""
+
+
+@pytest.fixture
+def temp_user(monkeypatch):
+    with contexts.tempdir() as user_base:
+        with contexts.tempdir() as user_site:
+            monkeypatch.setattr('site.USER_BASE', user_base)
+            monkeypatch.setattr('site.USER_SITE', user_site)
+            yield
+
+
+@pytest.fixture
+def test_env(tmpdir, temp_user):
+    target = tmpdir
+    foo = target.mkdir('foo')
+    setup = target / 'setup.py'
+    if setup.isfile():
+        raise ValueError(dir(target))
+    with setup.open('w') as f:
+        f.write(SETUP_PY)
+    init = foo / '__init__.py'
+    with init.open('w') as f:
+        f.write(INIT_PY)
+    with target.as_cwd():
+        yield target
+
+
+class TestDevelop:
+    in_virtualenv = hasattr(sys, 'real_prefix')
+    in_venv = hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix
+
+    def test_console_scripts(self, tmpdir):
+        """
+        Test that console scripts are installed and that they reference
+        only the project by name and not the current version.
+        """
+        pytest.skip(
+            "TODO: needs a fixture to cause 'develop' "
+            "to be invoked without mutating environment."
+        )
+        settings = dict(
+            name='foo',
+            packages=['foo'],
+            version='0.0',
+            entry_points={
+                'console_scripts': [
+                    'foocmd = foo:foo',
+                ],
+            },
+        )
+        dist = Distribution(settings)
+        dist.script_name = 'setup.py'
+        cmd = develop(dist)
+        cmd.ensure_finalized()
+        cmd.install_dir = tmpdir
+        cmd.run()
+        # assert '0.0' not in foocmd_text
+
+    @pytest.mark.xfail(reason="legacy behavior retained for compatibility #4167")
+    def test_egg_link_filename(self):
+        settings = dict(
+            name='Foo $$$ Bar_baz-bing',
+        )
+        dist = Distribution(settings)
+        cmd = develop(dist)
+        cmd.ensure_finalized()
+        link = pathlib.Path(cmd.egg_link)
+        assert link.suffix == '.egg-link'
+        assert link.stem == 'Foo_Bar_baz_bing'
+
+
+class TestResolver:
+    """
+    TODO: These tests were written with a minimal understanding
+    of what _resolve_setup_path is intending to do. Come up with
+    more meaningful cases that look like real-world scenarios.
+    """
+
+    def test_resolve_setup_path_cwd(self):
+        assert develop._resolve_setup_path('.', '.', '.') == '.'
+
+    def test_resolve_setup_path_one_dir(self):
+        assert develop._resolve_setup_path('pkgs', '.', 'pkgs') == '../'
+
+    def test_resolve_setup_path_one_dir_trailing_slash(self):
+        assert develop._resolve_setup_path('pkgs/', '.', 'pkgs') == '../'
+
+
+class TestNamespaces:
+    @staticmethod
+    def install_develop(src_dir, target):
+        develop_cmd = [
+            sys.executable,
+            'setup.py',
+            'develop',
+            '--install-dir',
+            str(target),
+        ]
+        with src_dir.as_cwd():
+            with paths_on_pythonpath([str(target)]):
+                subprocess.check_call(develop_cmd)
+
+    @pytest.mark.skipif(
+        bool(os.environ.get("APPVEYOR")),
+        reason="https://github.com/pypa/setuptools/issues/851",
+    )
+    @pytest.mark.skipif(
+        platform.python_implementation() == 'PyPy',
+        reason="https://github.com/pypa/setuptools/issues/1202",
+    )
+    def test_namespace_package_importable(self, tmpdir):
+        """
+        Installing two packages sharing the same namespace, one installed
+        naturally using pip or `--single-version-externally-managed`
+        and the other installed using `develop` should leave the namespace
+        in tact and both packages reachable by import.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
+        target = tmpdir / 'packages'
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip',
+            'install',
+            str(pkg_A),
+            '-t',
+            str(target),
+        ]
+        subprocess.check_call(install_cmd)
+        self.install_develop(pkg_B, target)
+        namespaces.make_site_dir(target)
+        try_import = [
+            sys.executable,
+            '-c',
+            'import myns.pkgA; import myns.pkgB',
+        ]
+        with paths_on_pythonpath([str(target)]):
+            subprocess.check_call(try_import)
+
+        # additionally ensure that pkg_resources import works
+        pkg_resources_imp = [
+            sys.executable,
+            '-c',
+            'import pkg_resources',
+        ]
+        with paths_on_pythonpath([str(target)]):
+            subprocess.check_call(pkg_resources_imp)
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_dist_info.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_dist_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..31e6e95a68445e952db9ee5510dd01d3c961bb93
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_dist_info.py
@@ -0,0 +1,210 @@
+"""Test .dist-info style distributions."""
+
+import pathlib
+import re
+import shutil
+import subprocess
+import sys
+from functools import partial
+
+import pytest
+
+import pkg_resources
+from setuptools.archive_util import unpack_archive
+
+from .textwrap import DALS
+
+read = partial(pathlib.Path.read_text, encoding="utf-8")
+
+
+class TestDistInfo:
+    metadata_base = DALS(
+        """
+        Metadata-Version: 1.2
+        Requires-Dist: splort (==4)
+        Provides-Extra: baz
+        Requires-Dist: quux (>=1.1); extra == 'baz'
+        """
+    )
+
+    @classmethod
+    def build_metadata(cls, **kwargs):
+        lines = ('{key}: {value}\n'.format(**locals()) for key, value in kwargs.items())
+        return cls.metadata_base + ''.join(lines)
+
+    @pytest.fixture
+    def metadata(self, tmpdir):
+        dist_info_name = 'VersionedDistribution-2.718.dist-info'
+        versioned = tmpdir / dist_info_name
+        versioned.mkdir()
+        filename = versioned / 'METADATA'
+        content = self.build_metadata(
+            Name='VersionedDistribution',
+        )
+        filename.write_text(content, encoding='utf-8')
+
+        dist_info_name = 'UnversionedDistribution.dist-info'
+        unversioned = tmpdir / dist_info_name
+        unversioned.mkdir()
+        filename = unversioned / 'METADATA'
+        content = self.build_metadata(
+            Name='UnversionedDistribution',
+            Version='0.3',
+        )
+        filename.write_text(content, encoding='utf-8')
+
+        return str(tmpdir)
+
+    def test_distinfo(self, metadata):
+        dists = dict(
+            (d.project_name, d) for d in pkg_resources.find_distributions(metadata)
+        )
+
+        assert len(dists) == 2, dists
+
+        unversioned = dists['UnversionedDistribution']
+        versioned = dists['VersionedDistribution']
+
+        assert versioned.version == '2.718'  # from filename
+        assert unversioned.version == '0.3'  # from METADATA
+
+    def test_conditional_dependencies(self, metadata):
+        specs = 'splort==4', 'quux>=1.1'
+        requires = list(map(pkg_resources.Requirement.parse, specs))
+
+        for d in pkg_resources.find_distributions(metadata):
+            assert d.requires() == requires[:1]
+            assert d.requires(extras=('baz',)) == [
+                requires[0],
+                pkg_resources.Requirement.parse('quux>=1.1;extra=="baz"'),
+            ]
+            assert d.extras == ['baz']
+
+    def test_invalid_version(self, tmp_path):
+        """
+        Supplying an invalid version crashes dist_info.
+        """
+        config = "[metadata]\nname=proj\nversion=42\n[egg_info]\ntag_build=invalid!!!\n"
+        (tmp_path / "setup.cfg").write_text(config, encoding="utf-8")
+        msg = re.compile("invalid version", re.M | re.I)
+        proc = run_command_inner("dist_info", cwd=tmp_path, check=False)
+        assert proc.returncode
+        assert msg.search(proc.stdout)
+        assert not list(tmp_path.glob("*.dist-info"))
+
+    def test_tag_arguments(self, tmp_path):
+        config = """
+        [metadata]
+        name=proj
+        version=42
+        [egg_info]
+        tag_date=1
+        tag_build=.post
+        """
+        (tmp_path / "setup.cfg").write_text(config, encoding="utf-8")
+
+        print(run_command("dist_info", "--no-date", cwd=tmp_path))
+        dist_info = next(tmp_path.glob("*.dist-info"))
+        assert dist_info.name.startswith("proj-42")
+        shutil.rmtree(dist_info)
+
+        print(run_command("dist_info", "--tag-build", ".a", cwd=tmp_path))
+        dist_info = next(tmp_path.glob("*.dist-info"))
+        assert dist_info.name.startswith("proj-42a")
+
+    @pytest.mark.parametrize("keep_egg_info", (False, True))
+    def test_output_dir(self, tmp_path, keep_egg_info):
+        config = "[metadata]\nname=proj\nversion=42\n"
+        (tmp_path / "setup.cfg").write_text(config, encoding="utf-8")
+        out = tmp_path / "__out"
+        out.mkdir()
+        opts = ["--keep-egg-info"] if keep_egg_info else []
+        run_command("dist_info", "--output-dir", out, *opts, cwd=tmp_path)
+        assert len(list(out.glob("*.dist-info"))) == 1
+        assert len(list(tmp_path.glob("*.dist-info"))) == 0
+        expected_egg_info = int(keep_egg_info)
+        assert len(list(out.glob("*.egg-info"))) == expected_egg_info
+        assert len(list(tmp_path.glob("*.egg-info"))) == 0
+        assert len(list(out.glob("*.__bkp__"))) == 0
+        assert len(list(tmp_path.glob("*.__bkp__"))) == 0
+
+
+class TestWheelCompatibility:
+    """Make sure the .dist-info directory produced with the ``dist_info`` command
+    is the same as the one produced by ``bdist_wheel``.
+    """
+
+    SETUPCFG = DALS(
+        """
+    [metadata]
+    name = {name}
+    version = {version}
+
+    [options]
+    install_requires =
+        foo>=12; sys_platform != "linux"
+
+    [options.extras_require]
+    test = pytest
+
+    [options.entry_points]
+    console_scripts =
+        executable-name = my_package.module:function
+    discover =
+        myproj = my_package.other_module:function
+    """
+    )
+
+    EGG_INFO_OPTS = [
+        # Related: #3088 #2872
+        ("", ""),
+        (".post", "[egg_info]\ntag_build = post\n"),
+        (".post", "[egg_info]\ntag_build = .post\n"),
+        (".post", "[egg_info]\ntag_build = post\ntag_date = 1\n"),
+        (".dev", "[egg_info]\ntag_build = .dev\n"),
+        (".dev", "[egg_info]\ntag_build = .dev\ntag_date = 1\n"),
+        ("a1", "[egg_info]\ntag_build = .a1\n"),
+        ("+local", "[egg_info]\ntag_build = +local\n"),
+    ]
+
+    @pytest.mark.parametrize("name", "my-proj my_proj my.proj My.Proj".split())
+    @pytest.mark.parametrize("version", ["0.42.13"])
+    @pytest.mark.parametrize(("suffix", "cfg"), EGG_INFO_OPTS)
+    def test_dist_info_is_the_same_as_in_wheel(
+        self, name, version, tmp_path, suffix, cfg
+    ):
+        config = self.SETUPCFG.format(name=name, version=version) + cfg
+
+        for i in "dir_wheel", "dir_dist":
+            (tmp_path / i).mkdir()
+            (tmp_path / i / "setup.cfg").write_text(config, encoding="utf-8")
+
+        run_command("bdist_wheel", cwd=tmp_path / "dir_wheel")
+        wheel = next(tmp_path.glob("dir_wheel/dist/*.whl"))
+        unpack_archive(wheel, tmp_path / "unpack")
+        wheel_dist_info = next(tmp_path.glob("unpack/*.dist-info"))
+
+        run_command("dist_info", cwd=tmp_path / "dir_dist")
+        dist_info = next(tmp_path.glob("dir_dist/*.dist-info"))
+
+        assert dist_info.name == wheel_dist_info.name
+        assert dist_info.name.startswith(f"{name.replace('-', '_')}-{version}{suffix}")
+        for file in "METADATA", "entry_points.txt":
+            assert read(dist_info / file) == read(wheel_dist_info / file)
+
+
+def run_command_inner(*cmd, **kwargs):
+    opts = {
+        "stderr": subprocess.STDOUT,
+        "stdout": subprocess.PIPE,
+        "text": True,
+        "encoding": "utf-8",
+        "check": True,
+        **kwargs,
+    }
+    cmd = [sys.executable, "-c", "__import__('setuptools').setup()", *map(str, cmd)]
+    return subprocess.run(cmd, **opts)
+
+
+def run_command(*args, **kwargs):
+    return run_command_inner(*args, **kwargs).stdout
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_editable_install.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_editable_install.py
new file mode 100644
index 0000000000000000000000000000000000000000..038dcadf934af20dbe11dcb13ba324ceb30bf90d
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_editable_install.py
@@ -0,0 +1,1289 @@
+from __future__ import annotations
+
+import os
+import platform
+import stat
+import subprocess
+import sys
+from copy import deepcopy
+from importlib import import_module
+from importlib.machinery import EXTENSION_SUFFIXES
+from pathlib import Path
+from textwrap import dedent
+from typing import Any
+from unittest.mock import Mock
+from uuid import uuid4
+
+import jaraco.envs
+import jaraco.path
+import pytest
+from path import Path as _Path
+
+from setuptools._importlib import resources as importlib_resources
+from setuptools.command.editable_wheel import (
+    _DebuggingTips,
+    _encode_pth,
+    _find_namespaces,
+    _find_package_roots,
+    _find_virtual_namespaces,
+    _finder_template,
+    _LinkTree,
+    _TopLevelFinder,
+    editable_wheel,
+)
+from setuptools.dist import Distribution
+from setuptools.extension import Extension
+from setuptools.warnings import SetuptoolsDeprecationWarning
+
+from . import contexts, namespaces
+
+from distutils.core import run_setup
+
+
+@pytest.fixture(params=["strict", "lenient"])
+def editable_opts(request):
+    if request.param == "strict":
+        return ["--config-settings", "editable-mode=strict"]
+    return []
+
+
+EXAMPLE = {
+    'pyproject.toml': dedent(
+        """\
+        [build-system]
+        requires = ["setuptools"]
+        build-backend = "setuptools.build_meta"
+
+        [project]
+        name = "mypkg"
+        version = "3.14159"
+        license = {text = "MIT"}
+        description = "This is a Python package"
+        dynamic = ["readme"]
+        classifiers = [
+            "Development Status :: 5 - Production/Stable",
+            "Intended Audience :: Developers"
+        ]
+        urls = {Homepage = "https://github.com"}
+
+        [tool.setuptools]
+        package-dir = {"" = "src"}
+        packages = {find = {where = ["src"]}}
+        license-files = ["LICENSE*"]
+
+        [tool.setuptools.dynamic]
+        readme = {file = "README.rst"}
+
+        [tool.distutils.egg_info]
+        tag-build = ".post0"
+        """
+    ),
+    "MANIFEST.in": dedent(
+        """\
+        global-include *.py *.txt
+        global-exclude *.py[cod]
+        prune dist
+        prune build
+        """
+    ).strip(),
+    "README.rst": "This is a ``README``",
+    "LICENSE.txt": "---- placeholder MIT license ----",
+    "src": {
+        "mypkg": {
+            "__init__.py": dedent(
+                """\
+                import sys
+                from importlib.metadata import PackageNotFoundError, version
+
+                try:
+                    __version__ = version(__name__)
+                except PackageNotFoundError:
+                    __version__ = "unknown"
+                """
+            ),
+            "__main__.py": dedent(
+                """\
+                from importlib.resources import read_text
+                from . import __version__, __name__ as parent
+                from .mod import x
+
+                data = read_text(parent, "data.txt")
+                print(__version__, data, x)
+                """
+            ),
+            "mod.py": "x = ''",
+            "data.txt": "Hello World",
+        }
+    },
+}
+
+
+SETUP_SCRIPT_STUB = "__import__('setuptools').setup()"
+
+
+@pytest.mark.xfail(sys.platform == "darwin", reason="pypa/setuptools#4328")
+@pytest.mark.parametrize(
+    "files",
+    [
+        {**EXAMPLE, "setup.py": SETUP_SCRIPT_STUB},
+        EXAMPLE,  # No setup.py script
+    ],
+)
+def test_editable_with_pyproject(tmp_path, venv, files, editable_opts):
+    project = tmp_path / "mypkg"
+    project.mkdir()
+    jaraco.path.build(files, prefix=project)
+
+    cmd = [
+        "python",
+        "-m",
+        "pip",
+        "install",
+        "--no-build-isolation",  # required to force current version of setuptools
+        "-e",
+        str(project),
+        *editable_opts,
+    ]
+    print(venv.run(cmd))
+
+    cmd = ["python", "-m", "mypkg"]
+    assert venv.run(cmd).strip() == "3.14159.post0 Hello World"
+
+    (project / "src/mypkg/data.txt").write_text("foobar", encoding="utf-8")
+    (project / "src/mypkg/mod.py").write_text("x = 42", encoding="utf-8")
+    assert venv.run(cmd).strip() == "3.14159.post0 foobar 42"
+
+
+def test_editable_with_flat_layout(tmp_path, venv, editable_opts):
+    files = {
+        "mypkg": {
+            "pyproject.toml": dedent(
+                """\
+                [build-system]
+                requires = ["setuptools", "wheel"]
+                build-backend = "setuptools.build_meta"
+
+                [project]
+                name = "mypkg"
+                version = "3.14159"
+
+                [tool.setuptools]
+                packages = ["pkg"]
+                py-modules = ["mod"]
+                """
+            ),
+            "pkg": {"__init__.py": "a = 4"},
+            "mod.py": "b = 2",
+        },
+    }
+    jaraco.path.build(files, prefix=tmp_path)
+    project = tmp_path / "mypkg"
+
+    cmd = [
+        "python",
+        "-m",
+        "pip",
+        "install",
+        "--no-build-isolation",  # required to force current version of setuptools
+        "-e",
+        str(project),
+        *editable_opts,
+    ]
+    print(venv.run(cmd))
+    cmd = ["python", "-c", "import pkg, mod; print(pkg.a, mod.b)"]
+    assert venv.run(cmd).strip() == "4 2"
+
+
+def test_editable_with_single_module(tmp_path, venv, editable_opts):
+    files = {
+        "mypkg": {
+            "pyproject.toml": dedent(
+                """\
+                [build-system]
+                requires = ["setuptools", "wheel"]
+                build-backend = "setuptools.build_meta"
+
+                [project]
+                name = "mod"
+                version = "3.14159"
+
+                [tool.setuptools]
+                py-modules = ["mod"]
+                """
+            ),
+            "mod.py": "b = 2",
+        },
+    }
+    jaraco.path.build(files, prefix=tmp_path)
+    project = tmp_path / "mypkg"
+
+    cmd = [
+        "python",
+        "-m",
+        "pip",
+        "install",
+        "--no-build-isolation",  # required to force current version of setuptools
+        "-e",
+        str(project),
+        *editable_opts,
+    ]
+    print(venv.run(cmd))
+    cmd = ["python", "-c", "import mod; print(mod.b)"]
+    assert venv.run(cmd).strip() == "2"
+
+
+class TestLegacyNamespaces:
+    # legacy => pkg_resources.declare_namespace(...) + setup(namespace_packages=...)
+
+    def test_nspkg_file_is_unique(self, tmp_path, monkeypatch):
+        deprecation = pytest.warns(
+            SetuptoolsDeprecationWarning, match=".*namespace_packages parameter.*"
+        )
+        installation_dir = tmp_path / ".installation_dir"
+        installation_dir.mkdir()
+        examples = (
+            "myns.pkgA",
+            "myns.pkgB",
+            "myns.n.pkgA",
+            "myns.n.pkgB",
+        )
+
+        for name in examples:
+            pkg = namespaces.build_namespace_package(tmp_path, name, version="42")
+            with deprecation, monkeypatch.context() as ctx:
+                ctx.chdir(pkg)
+                dist = run_setup("setup.py", stop_after="config")
+                cmd = editable_wheel(dist)
+                cmd.finalize_options()
+                editable_name = cmd.get_finalized_command("dist_info").name
+                cmd._install_namespaces(installation_dir, editable_name)
+
+        files = list(installation_dir.glob("*-nspkg.pth"))
+        assert len(files) == len(examples)
+
+    @pytest.mark.parametrize(
+        "impl",
+        (
+            "pkg_resources",
+            #  "pkgutil",  => does not work
+        ),
+    )
+    @pytest.mark.parametrize("ns", ("myns.n",))
+    def test_namespace_package_importable(
+        self, venv, tmp_path, ns, impl, editable_opts
+    ):
+        """
+        Installing two packages sharing the same namespace, one installed
+        naturally using pip or `--single-version-externally-managed`
+        and the other installed in editable mode should leave the namespace
+        intact and both packages reachable by import.
+        (Ported from test_develop).
+        """
+        build_system = """\
+        [build-system]
+        requires = ["setuptools"]
+        build-backend = "setuptools.build_meta"
+        """
+        pkg_A = namespaces.build_namespace_package(tmp_path, f"{ns}.pkgA", impl=impl)
+        pkg_B = namespaces.build_namespace_package(tmp_path, f"{ns}.pkgB", impl=impl)
+        (pkg_A / "pyproject.toml").write_text(build_system, encoding="utf-8")
+        (pkg_B / "pyproject.toml").write_text(build_system, encoding="utf-8")
+        # use pip to install to the target directory
+        opts = editable_opts[:]
+        opts.append("--no-build-isolation")  # force current version of setuptools
+        venv.run(["python", "-m", "pip", "install", str(pkg_A), *opts])
+        venv.run(["python", "-m", "pip", "install", "-e", str(pkg_B), *opts])
+        venv.run(["python", "-c", f"import {ns}.pkgA; import {ns}.pkgB"])
+        # additionally ensure that pkg_resources import works
+        venv.run(["python", "-c", "import pkg_resources"])
+
+
+class TestPep420Namespaces:
+    def test_namespace_package_importable(self, venv, tmp_path, editable_opts):
+        """
+        Installing two packages sharing the same namespace, one installed
+        normally using pip and the other installed in editable mode
+        should allow importing both packages.
+        """
+        pkg_A = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgA')
+        pkg_B = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgB')
+        # use pip to install to the target directory
+        opts = editable_opts[:]
+        opts.append("--no-build-isolation")  # force current version of setuptools
+        venv.run(["python", "-m", "pip", "install", str(pkg_A), *opts])
+        venv.run(["python", "-m", "pip", "install", "-e", str(pkg_B), *opts])
+        venv.run(["python", "-c", "import myns.n.pkgA; import myns.n.pkgB"])
+
+    def test_namespace_created_via_package_dir(self, venv, tmp_path, editable_opts):
+        """Currently users can create a namespace by tweaking `package_dir`"""
+        files = {
+            "pkgA": {
+                "pyproject.toml": dedent(
+                    """\
+                    [build-system]
+                    requires = ["setuptools", "wheel"]
+                    build-backend = "setuptools.build_meta"
+
+                    [project]
+                    name = "pkgA"
+                    version = "3.14159"
+
+                    [tool.setuptools]
+                    package-dir = {"myns.n.pkgA" = "src"}
+                    """
+                ),
+                "src": {"__init__.py": "a = 1"},
+            },
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+        pkg_A = tmp_path / "pkgA"
+        pkg_B = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgB')
+        pkg_C = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgC')
+
+        # use pip to install to the target directory
+        opts = editable_opts[:]
+        opts.append("--no-build-isolation")  # force current version of setuptools
+        venv.run(["python", "-m", "pip", "install", str(pkg_A), *opts])
+        venv.run(["python", "-m", "pip", "install", "-e", str(pkg_B), *opts])
+        venv.run(["python", "-m", "pip", "install", "-e", str(pkg_C), *opts])
+        venv.run(["python", "-c", "from myns.n import pkgA, pkgB, pkgC"])
+
+    def test_namespace_accidental_config_in_lenient_mode(self, venv, tmp_path):
+        """Sometimes users might specify an ``include`` pattern that ignores parent
+        packages. In a normal installation this would ignore all modules inside the
+        parent packages, and make them namespaces (reported in issue #3504),
+        so the editable mode should preserve this behaviour.
+        """
+        files = {
+            "pkgA": {
+                "pyproject.toml": dedent(
+                    """\
+                    [build-system]
+                    requires = ["setuptools", "wheel"]
+                    build-backend = "setuptools.build_meta"
+
+                    [project]
+                    name = "pkgA"
+                    version = "3.14159"
+
+                    [tool.setuptools]
+                    packages.find.include = ["mypkg.*"]
+                    """
+                ),
+                "mypkg": {
+                    "__init__.py": "",
+                    "other.py": "b = 1",
+                    "n": {
+                        "__init__.py": "",
+                        "pkgA.py": "a = 1",
+                    },
+                },
+                "MANIFEST.in": EXAMPLE["MANIFEST.in"],
+            },
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+        pkg_A = tmp_path / "pkgA"
+
+        # use pip to install to the target directory
+        opts = ["--no-build-isolation"]  # force current version of setuptools
+        venv.run(["python", "-m", "pip", "-v", "install", "-e", str(pkg_A), *opts])
+        out = venv.run(["python", "-c", "from mypkg.n import pkgA; print(pkgA.a)"])
+        assert out.strip() == "1"
+        cmd = """\
+        try:
+            import mypkg.other
+        except ImportError:
+            print("mypkg.other not defined")
+        """
+        out = venv.run(["python", "-c", dedent(cmd)])
+        assert "mypkg.other not defined" in out
+
+
+def test_editable_with_prefix(tmp_path, sample_project, editable_opts):
+    """
+    Editable install to a prefix should be discoverable.
+    """
+    prefix = tmp_path / 'prefix'
+
+    # figure out where pip will likely install the package
+    site_packages_all = [
+        prefix / Path(path).relative_to(sys.prefix)
+        for path in sys.path
+        if 'site-packages' in path and path.startswith(sys.prefix)
+    ]
+
+    for sp in site_packages_all:
+        sp.mkdir(parents=True)
+
+    # install workaround
+    _addsitedirs(site_packages_all)
+
+    env = dict(os.environ, PYTHONPATH=os.pathsep.join(map(str, site_packages_all)))
+    cmd = [
+        sys.executable,
+        '-m',
+        'pip',
+        'install',
+        '--editable',
+        str(sample_project),
+        '--prefix',
+        str(prefix),
+        '--no-build-isolation',
+        *editable_opts,
+    ]
+    subprocess.check_call(cmd, env=env)
+
+    # now run 'sample' with the prefix on the PYTHONPATH
+    bin = 'Scripts' if platform.system() == 'Windows' else 'bin'
+    exe = prefix / bin / 'sample'
+    subprocess.check_call([exe], env=env)
+
+
+class TestFinderTemplate:
+    """This test focus in getting a particular implementation detail right.
+    If at some point in time the implementation is changed for something different,
+    this test can be modified or even excluded.
+    """
+
+    def install_finder(self, finder):
+        loc = {}
+        exec(finder, loc, loc)
+        loc["install"]()
+
+    def test_packages(self, tmp_path):
+        files = {
+            "src1": {
+                "pkg1": {
+                    "__init__.py": "",
+                    "subpkg": {"mod1.py": "a = 42"},
+                },
+            },
+            "src2": {"mod2.py": "a = 43"},
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {
+            "pkg1": str(tmp_path / "src1/pkg1"),
+            "mod2": str(tmp_path / "src2/mod2"),
+        }
+        template = _finder_template(str(uuid4()), mapping, {})
+
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in ("pkg1", "pkg1.subpkg", "pkg1.subpkg.mod1", "mod2"):
+                sys.modules.pop(mod, None)
+
+            self.install_finder(template)
+            mod1 = import_module("pkg1.subpkg.mod1")
+            mod2 = import_module("mod2")
+            subpkg = import_module("pkg1.subpkg")
+
+            assert mod1.a == 42
+            assert mod2.a == 43
+            expected = str((tmp_path / "src1/pkg1/subpkg").resolve())
+            assert_path(subpkg, expected)
+
+    def test_namespace(self, tmp_path):
+        files = {"pkg": {"__init__.py": "a = 13", "text.txt": "abc"}}
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {"ns.othername": str(tmp_path / "pkg")}
+        namespaces = {"ns": []}
+
+        template = _finder_template(str(uuid4()), mapping, namespaces)
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in ("ns", "ns.othername"):
+                sys.modules.pop(mod, None)
+
+            self.install_finder(template)
+            pkg = import_module("ns.othername")
+            text = importlib_resources.files(pkg) / "text.txt"
+
+            expected = str((tmp_path / "pkg").resolve())
+            assert_path(pkg, expected)
+            assert pkg.a == 13
+
+            # Make sure resources can also be found
+            assert text.read_text(encoding="utf-8") == "abc"
+
+    def test_combine_namespaces(self, tmp_path):
+        files = {
+            "src1": {"ns": {"pkg1": {"__init__.py": "a = 13"}}},
+            "src2": {"ns": {"mod2.py": "b = 37"}},
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {
+            "ns.pkgA": str(tmp_path / "src1/ns/pkg1"),
+            "ns": str(tmp_path / "src2/ns"),
+        }
+        namespaces_ = {"ns": [str(tmp_path / "src1"), str(tmp_path / "src2")]}
+        template = _finder_template(str(uuid4()), mapping, namespaces_)
+
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in ("ns", "ns.pkgA", "ns.mod2"):
+                sys.modules.pop(mod, None)
+
+            self.install_finder(template)
+            pkgA = import_module("ns.pkgA")
+            mod2 = import_module("ns.mod2")
+
+            expected = str((tmp_path / "src1/ns/pkg1").resolve())
+            assert_path(pkgA, expected)
+            assert pkgA.a == 13
+            assert mod2.b == 37
+
+    def test_combine_namespaces_nested(self, tmp_path):
+        """
+        Users may attempt to combine namespace packages in a nested way via
+        ``package_dir`` as shown in pypa/setuptools#4248.
+        """
+
+        files = {
+            "src": {"my_package": {"my_module.py": "a = 13"}},
+            "src2": {"my_package2": {"my_module2.py": "b = 37"}},
+        }
+
+        stack = jaraco.path.DirectoryStack()
+        with stack.context(tmp_path):
+            jaraco.path.build(files)
+            attrs = {
+                "script_name": "%PEP 517%",
+                "package_dir": {
+                    "different_name": "src/my_package",
+                    "different_name.subpkg": "src2/my_package2",
+                },
+                "packages": ["different_name", "different_name.subpkg"],
+            }
+            dist = Distribution(attrs)
+            finder = _TopLevelFinder(dist, str(uuid4()))
+            code = next(v for k, v in finder.get_implementation() if k.endswith(".py"))
+
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in attrs["packages"]:
+                sys.modules.pop(mod, None)
+
+            self.install_finder(code)
+            mod1 = import_module("different_name.my_module")
+            mod2 = import_module("different_name.subpkg.my_module2")
+
+            expected = str((tmp_path / "src/my_package/my_module.py").resolve())
+            assert str(Path(mod1.__file__).resolve()) == expected
+
+            expected = str((tmp_path / "src2/my_package2/my_module2.py").resolve())
+            assert str(Path(mod2.__file__).resolve()) == expected
+
+            assert mod1.a == 13
+            assert mod2.b == 37
+
+    def test_dynamic_path_computation(self, tmp_path):
+        # Follows the example in PEP 420
+        files = {
+            "project1": {"parent": {"child": {"one.py": "x = 1"}}},
+            "project2": {"parent": {"child": {"two.py": "x = 2"}}},
+            "project3": {"parent": {"child": {"three.py": "x = 3"}}},
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+        mapping = {}
+        namespaces_ = {"parent": [str(tmp_path / "project1/parent")]}
+        template = _finder_template(str(uuid4()), mapping, namespaces_)
+
+        mods = (f"parent.child.{name}" for name in ("one", "two", "three"))
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in ("parent", "parent.child", "parent.child", *mods):
+                sys.modules.pop(mod, None)
+
+            self.install_finder(template)
+
+            one = import_module("parent.child.one")
+            assert one.x == 1
+
+            with pytest.raises(ImportError):
+                import_module("parent.child.two")
+
+            sys.path.append(str(tmp_path / "project2"))
+            two = import_module("parent.child.two")
+            assert two.x == 2
+
+            with pytest.raises(ImportError):
+                import_module("parent.child.three")
+
+            sys.path.append(str(tmp_path / "project3"))
+            three = import_module("parent.child.three")
+            assert three.x == 3
+
+    def test_no_recursion(self, tmp_path):
+        # See issue #3550
+        files = {
+            "pkg": {
+                "__init__.py": "from . import pkg",
+            },
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {
+            "pkg": str(tmp_path / "pkg"),
+        }
+        template = _finder_template(str(uuid4()), mapping, {})
+
+        with contexts.save_paths(), contexts.save_sys_modules():
+            sys.modules.pop("pkg", None)
+
+            self.install_finder(template)
+            with pytest.raises(ImportError, match="pkg"):
+                import_module("pkg")
+
+    def test_similar_name(self, tmp_path):
+        files = {
+            "foo": {
+                "__init__.py": "",
+                "bar": {
+                    "__init__.py": "",
+                },
+            },
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {
+            "foo": str(tmp_path / "foo"),
+        }
+        template = _finder_template(str(uuid4()), mapping, {})
+
+        with contexts.save_paths(), contexts.save_sys_modules():
+            sys.modules.pop("foo", None)
+            sys.modules.pop("foo.bar", None)
+
+            self.install_finder(template)
+            with pytest.raises(ImportError, match="foobar"):
+                import_module("foobar")
+
+    def test_case_sensitivity(self, tmp_path):
+        files = {
+            "foo": {
+                "__init__.py": "",
+                "lowercase.py": "x = 1",
+                "bar": {
+                    "__init__.py": "",
+                    "lowercase.py": "x = 2",
+                },
+            },
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+        mapping = {
+            "foo": str(tmp_path / "foo"),
+        }
+        template = _finder_template(str(uuid4()), mapping, {})
+        with contexts.save_paths(), contexts.save_sys_modules():
+            sys.modules.pop("foo", None)
+
+            self.install_finder(template)
+            with pytest.raises(ImportError, match="'FOO'"):
+                import_module("FOO")
+
+            with pytest.raises(ImportError, match="'foo\\.LOWERCASE'"):
+                import_module("foo.LOWERCASE")
+
+            with pytest.raises(ImportError, match="'foo\\.bar\\.Lowercase'"):
+                import_module("foo.bar.Lowercase")
+
+            with pytest.raises(ImportError, match="'foo\\.BAR'"):
+                import_module("foo.BAR.lowercase")
+
+            with pytest.raises(ImportError, match="'FOO'"):
+                import_module("FOO.bar.lowercase")
+
+            mod = import_module("foo.lowercase")
+            assert mod.x == 1
+
+            mod = import_module("foo.bar.lowercase")
+            assert mod.x == 2
+
+    def test_namespace_case_sensitivity(self, tmp_path):
+        files = {
+            "pkg": {
+                "__init__.py": "a = 13",
+                "foo": {
+                    "__init__.py": "b = 37",
+                    "bar.py": "c = 42",
+                },
+            },
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {"ns.othername": str(tmp_path / "pkg")}
+        namespaces = {"ns": []}
+
+        template = _finder_template(str(uuid4()), mapping, namespaces)
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in ("ns", "ns.othername"):
+                sys.modules.pop(mod, None)
+
+            self.install_finder(template)
+            pkg = import_module("ns.othername")
+            expected = str((tmp_path / "pkg").resolve())
+            assert_path(pkg, expected)
+            assert pkg.a == 13
+
+            foo = import_module("ns.othername.foo")
+            assert foo.b == 37
+
+            bar = import_module("ns.othername.foo.bar")
+            assert bar.c == 42
+
+            with pytest.raises(ImportError, match="'NS'"):
+                import_module("NS.othername.foo")
+
+            with pytest.raises(ImportError, match="'ns\\.othername\\.FOO\\'"):
+                import_module("ns.othername.FOO")
+
+            with pytest.raises(ImportError, match="'ns\\.othername\\.foo\\.BAR\\'"):
+                import_module("ns.othername.foo.BAR")
+
+    def test_intermediate_packages(self, tmp_path):
+        """
+        The finder should not import ``fullname`` if the intermediate segments
+        don't exist (see pypa/setuptools#4019).
+        """
+        files = {
+            "src": {
+                "mypkg": {
+                    "__init__.py": "",
+                    "config.py": "a = 13",
+                    "helloworld.py": "b = 13",
+                    "components": {
+                        "config.py": "a = 37",
+                    },
+                },
+            }
+        }
+        jaraco.path.build(files, prefix=tmp_path)
+
+        mapping = {"mypkg": str(tmp_path / "src/mypkg")}
+        template = _finder_template(str(uuid4()), mapping, {})
+
+        with contexts.save_paths(), contexts.save_sys_modules():
+            for mod in (
+                "mypkg",
+                "mypkg.config",
+                "mypkg.helloworld",
+                "mypkg.components",
+                "mypkg.components.config",
+                "mypkg.components.helloworld",
+            ):
+                sys.modules.pop(mod, None)
+
+            self.install_finder(template)
+
+            config = import_module("mypkg.components.config")
+            assert config.a == 37
+
+            helloworld = import_module("mypkg.helloworld")
+            assert helloworld.b == 13
+
+            with pytest.raises(ImportError):
+                import_module("mypkg.components.helloworld")
+
+
+def test_pkg_roots(tmp_path):
+    """This test focus in getting a particular implementation detail right.
+    If at some point in time the implementation is changed for something different,
+    this test can be modified or even excluded.
+    """
+    files = {
+        "a": {"b": {"__init__.py": "ab = 1"}, "__init__.py": "a = 1"},
+        "d": {"__init__.py": "d = 1", "e": {"__init__.py": "de = 1"}},
+        "f": {"g": {"h": {"__init__.py": "fgh = 1"}}},
+        "other": {"__init__.py": "abc = 1"},
+        "another": {"__init__.py": "abcxyz = 1"},
+        "yet_another": {"__init__.py": "mnopq = 1"},
+    }
+    jaraco.path.build(files, prefix=tmp_path)
+    package_dir = {
+        "a.b.c": "other",
+        "a.b.c.x.y.z": "another",
+        "m.n.o.p.q": "yet_another",
+    }
+    packages = [
+        "a",
+        "a.b",
+        "a.b.c",
+        "a.b.c.x.y",
+        "a.b.c.x.y.z",
+        "d",
+        "d.e",
+        "f",
+        "f.g",
+        "f.g.h",
+        "m.n.o.p.q",
+    ]
+    roots = _find_package_roots(packages, package_dir, tmp_path)
+    assert roots == {
+        "a": str(tmp_path / "a"),
+        "a.b.c": str(tmp_path / "other"),
+        "a.b.c.x.y.z": str(tmp_path / "another"),
+        "d": str(tmp_path / "d"),
+        "f": str(tmp_path / "f"),
+        "m.n.o.p.q": str(tmp_path / "yet_another"),
+    }
+
+    ns = set(dict(_find_namespaces(packages, roots)))
+    assert ns == {"f", "f.g"}
+
+    ns = set(_find_virtual_namespaces(roots))
+    assert ns == {"a.b", "a.b.c.x", "a.b.c.x.y", "m", "m.n", "m.n.o", "m.n.o.p"}
+
+
+class TestOverallBehaviour:
+    PYPROJECT = """\
+        [build-system]
+        requires = ["setuptools"]
+        build-backend = "setuptools.build_meta"
+
+        [project]
+        name = "mypkg"
+        version = "3.14159"
+        """
+
+    # Any: Would need a TypedDict. Keep it simple for tests
+    FLAT_LAYOUT: dict[str, Any] = {
+        "pyproject.toml": dedent(PYPROJECT),
+        "MANIFEST.in": EXAMPLE["MANIFEST.in"],
+        "otherfile.py": "",
+        "mypkg": {
+            "__init__.py": "",
+            "mod1.py": "var = 42",
+            "subpackage": {
+                "__init__.py": "",
+                "mod2.py": "var = 13",
+                "resource_file.txt": "resource 39",
+            },
+        },
+    }
+
+    EXAMPLES = {
+        "flat-layout": FLAT_LAYOUT,
+        "src-layout": {
+            "pyproject.toml": dedent(PYPROJECT),
+            "MANIFEST.in": EXAMPLE["MANIFEST.in"],
+            "otherfile.py": "",
+            "src": {"mypkg": FLAT_LAYOUT["mypkg"]},
+        },
+        "custom-layout": {
+            "pyproject.toml": dedent(PYPROJECT)
+            + dedent(
+                """\
+                [tool.setuptools]
+                packages = ["mypkg", "mypkg.subpackage"]
+
+                [tool.setuptools.package-dir]
+                "mypkg.subpackage" = "other"
+                """
+            ),
+            "MANIFEST.in": EXAMPLE["MANIFEST.in"],
+            "otherfile.py": "",
+            "mypkg": {
+                "__init__.py": "",
+                "mod1.py": FLAT_LAYOUT["mypkg"]["mod1.py"],
+            },
+            "other": FLAT_LAYOUT["mypkg"]["subpackage"],
+        },
+        "namespace": {
+            "pyproject.toml": dedent(PYPROJECT),
+            "MANIFEST.in": EXAMPLE["MANIFEST.in"],
+            "otherfile.py": "",
+            "src": {
+                "mypkg": {
+                    "mod1.py": FLAT_LAYOUT["mypkg"]["mod1.py"],
+                    "subpackage": FLAT_LAYOUT["mypkg"]["subpackage"],
+                },
+            },
+        },
+    }
+
+    @pytest.mark.xfail(sys.platform == "darwin", reason="pypa/setuptools#4328")
+    @pytest.mark.parametrize("layout", EXAMPLES.keys())
+    def test_editable_install(self, tmp_path, venv, layout, editable_opts):
+        project, _ = install_project(
+            "mypkg", venv, tmp_path, self.EXAMPLES[layout], *editable_opts
+        )
+
+        # Ensure stray files are not importable
+        cmd_import_error = """\
+        try:
+            import otherfile
+        except ImportError as ex:
+            print(ex)
+        """
+        out = venv.run(["python", "-c", dedent(cmd_import_error)])
+        assert "No module named 'otherfile'" in out
+
+        # Ensure the modules are importable
+        cmd_get_vars = """\
+        import mypkg, mypkg.mod1, mypkg.subpackage.mod2
+        print(mypkg.mod1.var, mypkg.subpackage.mod2.var)
+        """
+        out = venv.run(["python", "-c", dedent(cmd_get_vars)])
+        assert "42 13" in out
+
+        # Ensure resources are reachable
+        cmd_get_resource = """\
+        import mypkg.subpackage
+        from setuptools._importlib import resources as importlib_resources
+        text = importlib_resources.files(mypkg.subpackage) / "resource_file.txt"
+        print(text.read_text(encoding="utf-8"))
+        """
+        out = venv.run(["python", "-c", dedent(cmd_get_resource)])
+        assert "resource 39" in out
+
+        # Ensure files are editable
+        mod1 = next(project.glob("**/mod1.py"))
+        mod2 = next(project.glob("**/mod2.py"))
+        resource_file = next(project.glob("**/resource_file.txt"))
+
+        mod1.write_text("var = 17", encoding="utf-8")
+        mod2.write_text("var = 781", encoding="utf-8")
+        resource_file.write_text("resource 374", encoding="utf-8")
+
+        out = venv.run(["python", "-c", dedent(cmd_get_vars)])
+        assert "42 13" not in out
+        assert "17 781" in out
+
+        out = venv.run(["python", "-c", dedent(cmd_get_resource)])
+        assert "resource 39" not in out
+        assert "resource 374" in out
+
+
+class TestLinkTree:
+    FILES = deepcopy(TestOverallBehaviour.EXAMPLES["src-layout"])
+    FILES["pyproject.toml"] += dedent(
+        """\
+        [tool.setuptools]
+        # Temporary workaround: both `include-package-data` and `package-data` configs
+        # can be removed after #3260 is fixed.
+        include-package-data = false
+        package-data = {"*" = ["*.txt"]}
+
+        [tool.setuptools.packages.find]
+        where = ["src"]
+        exclude = ["*.subpackage*"]
+        """
+    )
+    FILES["src"]["mypkg"]["resource.not_in_manifest"] = "abc"
+
+    def test_generated_tree(self, tmp_path):
+        jaraco.path.build(self.FILES, prefix=tmp_path)
+
+        with _Path(tmp_path):
+            name = "mypkg-3.14159"
+            dist = Distribution({"script_name": "%PEP 517%"})
+            dist.parse_config_files()
+
+            wheel = Mock()
+            aux = tmp_path / ".aux"
+            build = tmp_path / ".build"
+            aux.mkdir()
+            build.mkdir()
+
+            build_py = dist.get_command_obj("build_py")
+            build_py.editable_mode = True
+            build_py.build_lib = str(build)
+            build_py.ensure_finalized()
+            outputs = build_py.get_outputs()
+            output_mapping = build_py.get_output_mapping()
+
+            make_tree = _LinkTree(dist, name, aux, build)
+            make_tree(wheel, outputs, output_mapping)
+
+            mod1 = next(aux.glob("**/mod1.py"))
+            expected = tmp_path / "src/mypkg/mod1.py"
+            assert_link_to(mod1, expected)
+
+            assert next(aux.glob("**/subpackage"), None) is None
+            assert next(aux.glob("**/mod2.py"), None) is None
+            assert next(aux.glob("**/resource_file.txt"), None) is None
+
+            assert next(aux.glob("**/resource.not_in_manifest"), None) is None
+
+    def test_strict_install(self, tmp_path, venv):
+        opts = ["--config-settings", "editable-mode=strict"]
+        install_project("mypkg", venv, tmp_path, self.FILES, *opts)
+
+        out = venv.run(["python", "-c", "import mypkg.mod1; print(mypkg.mod1.var)"])
+        assert "42" in out
+
+        # Ensure packages excluded from distribution are not importable
+        cmd_import_error = """\
+        try:
+            from mypkg import subpackage
+        except ImportError as ex:
+            print(ex)
+        """
+        out = venv.run(["python", "-c", dedent(cmd_import_error)])
+        assert "cannot import name 'subpackage'" in out
+
+        # Ensure resource files excluded from distribution are not reachable
+        cmd_get_resource = """\
+        import mypkg
+        from setuptools._importlib import resources as importlib_resources
+        try:
+            text = importlib_resources.files(mypkg) / "resource.not_in_manifest"
+            print(text.read_text(encoding="utf-8"))
+        except FileNotFoundError as ex:
+            print(ex)
+        """
+        out = venv.run(["python", "-c", dedent(cmd_get_resource)])
+        assert "No such file or directory" in out
+        assert "resource.not_in_manifest" in out
+
+
+@pytest.mark.filterwarnings("ignore:.*compat.*:setuptools.SetuptoolsDeprecationWarning")
+def test_compat_install(tmp_path, venv):
+    # TODO: Remove `compat` after Dec/2022.
+    opts = ["--config-settings", "editable-mode=compat"]
+    files = TestOverallBehaviour.EXAMPLES["custom-layout"]
+    install_project("mypkg", venv, tmp_path, files, *opts)
+
+    out = venv.run(["python", "-c", "import mypkg.mod1; print(mypkg.mod1.var)"])
+    assert "42" in out
+
+    expected_path = comparable_path(str(tmp_path))
+
+    # Compatible behaviour will make spurious modules and excluded
+    # files importable directly from the original path
+    for cmd in (
+        "import otherfile; print(otherfile)",
+        "import other; print(other)",
+        "import mypkg; print(mypkg)",
+    ):
+        out = comparable_path(venv.run(["python", "-c", cmd]))
+        assert expected_path in out
+
+    # Compatible behaviour will not consider custom mappings
+    cmd = """\
+    try:
+        from mypkg import subpackage;
+    except ImportError as ex:
+        print(ex)
+    """
+    out = venv.run(["python", "-c", dedent(cmd)])
+    assert "cannot import name 'subpackage'" in out
+
+
+def test_pbr_integration(tmp_path, venv, editable_opts):
+    """Ensure editable installs work with pbr, issue #3500"""
+    files = {
+        "pyproject.toml": dedent(
+            """\
+            [build-system]
+            requires = ["setuptools"]
+            build-backend = "setuptools.build_meta"
+            """
+        ),
+        "setup.py": dedent(
+            """\
+            __import__('setuptools').setup(
+                pbr=True,
+                setup_requires=["pbr"],
+            )
+            """
+        ),
+        "setup.cfg": dedent(
+            """\
+            [metadata]
+            name = mypkg
+
+            [files]
+            packages =
+                mypkg
+            """
+        ),
+        "mypkg": {
+            "__init__.py": "",
+            "hello.py": "print('Hello world!')",
+        },
+        "other": {"test.txt": "Another file in here."},
+    }
+    venv.run(["python", "-m", "pip", "install", "pbr"])
+
+    with contexts.environment(PBR_VERSION="0.42"):
+        install_project("mypkg", venv, tmp_path, files, *editable_opts)
+
+    out = venv.run(["python", "-c", "import mypkg.hello"])
+    assert "Hello world!" in out
+
+
+class TestCustomBuildPy:
+    """
+    Issue #3501 indicates that some plugins/customizations might rely on:
+
+    1. ``build_py`` not running
+    2. ``build_py`` always copying files to ``build_lib``
+
+    During the transition period setuptools should prevent potential errors from
+    happening due to those assumptions.
+    """
+
+    # TODO: Remove tests after _run_build_steps is removed.
+
+    FILES = {
+        **TestOverallBehaviour.EXAMPLES["flat-layout"],
+        "setup.py": dedent(
+            """\
+            import pathlib
+            from setuptools import setup
+            from setuptools.command.build_py import build_py as orig
+
+            class my_build_py(orig):
+                def run(self):
+                    super().run()
+                    raise ValueError("TEST_RAISE")
+
+            setup(cmdclass={"build_py": my_build_py})
+            """
+        ),
+    }
+
+    def test_safeguarded_from_errors(self, tmp_path, venv):
+        """Ensure that errors in custom build_py are reported as warnings"""
+        # Warnings should show up
+        _, out = install_project("mypkg", venv, tmp_path, self.FILES)
+        assert "SetuptoolsDeprecationWarning" in out
+        assert "ValueError: TEST_RAISE" in out
+        # but installation should be successful
+        out = venv.run(["python", "-c", "import mypkg.mod1; print(mypkg.mod1.var)"])
+        assert "42" in out
+
+
+class TestCustomBuildWheel:
+    def install_custom_build_wheel(self, dist):
+        bdist_wheel_cls = dist.get_command_class("bdist_wheel")
+
+        class MyBdistWheel(bdist_wheel_cls):
+            def get_tag(self):
+                # In issue #3513, we can see that some extensions may try to access
+                # the `plat_name` property in bdist_wheel
+                if self.plat_name.startswith("macosx-"):
+                    _ = "macOS platform"
+                return super().get_tag()
+
+        dist.cmdclass["bdist_wheel"] = MyBdistWheel
+
+    def test_access_plat_name(self, tmpdir_cwd):
+        # Even when a custom bdist_wheel tries to access plat_name the build should
+        # be successful
+        jaraco.path.build({"module.py": "x = 42"})
+        dist = Distribution()
+        dist.script_name = "setup.py"
+        dist.set_defaults()
+        self.install_custom_build_wheel(dist)
+        cmd = editable_wheel(dist)
+        cmd.ensure_finalized()
+        cmd.run()
+        wheel_file = str(next(Path().glob('dist/*.whl')))
+        assert "editable" in wheel_file
+
+
+class TestCustomBuildExt:
+    def install_custom_build_ext_distutils(self, dist):
+        from distutils.command.build_ext import build_ext as build_ext_cls
+
+        class MyBuildExt(build_ext_cls):
+            pass
+
+        dist.cmdclass["build_ext"] = MyBuildExt
+
+    @pytest.mark.skipif(
+        sys.platform != "linux", reason="compilers may fail without correct setup"
+    )
+    def test_distutils_leave_inplace_files(self, tmpdir_cwd):
+        jaraco.path.build({"module.c": ""})
+        attrs = {
+            "ext_modules": [Extension("module", ["module.c"])],
+        }
+        dist = Distribution(attrs)
+        dist.script_name = "setup.py"
+        dist.set_defaults()
+        self.install_custom_build_ext_distutils(dist)
+        cmd = editable_wheel(dist)
+        cmd.ensure_finalized()
+        cmd.run()
+        wheel_file = str(next(Path().glob('dist/*.whl')))
+        assert "editable" in wheel_file
+        files = [p for p in Path().glob("module.*") if p.suffix != ".c"]
+        assert len(files) == 1
+        name = files[0].name
+        assert any(name.endswith(ext) for ext in EXTENSION_SUFFIXES)
+
+
+def test_debugging_tips(tmpdir_cwd, monkeypatch):
+    """Make sure to display useful debugging tips to the user."""
+    jaraco.path.build({"module.py": "x = 42"})
+    dist = Distribution()
+    dist.script_name = "setup.py"
+    dist.set_defaults()
+    cmd = editable_wheel(dist)
+    cmd.ensure_finalized()
+
+    SimulatedErr = type("SimulatedErr", (Exception,), {})
+    simulated_failure = Mock(side_effect=SimulatedErr())
+    monkeypatch.setattr(cmd, "get_finalized_command", simulated_failure)
+
+    expected_msg = "following steps are recommended to help debug"
+    with pytest.raises(SimulatedErr), pytest.warns(_DebuggingTips, match=expected_msg):
+        cmd.run()
+
+
+@pytest.mark.filterwarnings("error")
+def test_encode_pth():
+    """Ensure _encode_pth function does not produce encoding warnings"""
+    content = _encode_pth("tkmilan_ç_utf8")  # no warnings (would be turned into errors)
+    assert isinstance(content, bytes)
+
+
+def install_project(name, venv, tmp_path, files, *opts):
+    project = tmp_path / name
+    project.mkdir()
+    jaraco.path.build(files, prefix=project)
+    opts = [*opts, "--no-build-isolation"]  # force current version of setuptools
+    out = venv.run(
+        ["python", "-m", "pip", "-v", "install", "-e", str(project), *opts],
+        stderr=subprocess.STDOUT,
+    )
+    return project, out
+
+
+def _addsitedirs(new_dirs):
+    """To use this function, it is necessary to insert new_dir in front of sys.path.
+    The Python process will try to import a ``sitecustomize`` module on startup.
+    If we manipulate sys.path/PYTHONPATH, we can force it to run our code,
+    which invokes ``addsitedir`` and ensure ``.pth`` files are loaded.
+    """
+    content = '\n'.join(
+        ("import site",)
+        + tuple(f"site.addsitedir({os.fspath(new_dir)!r})" for new_dir in new_dirs)
+    )
+    (new_dirs[0] / "sitecustomize.py").write_text(content, encoding="utf-8")
+
+
+# ---- Assertion Helpers ----
+
+
+def assert_path(pkg, expected):
+    # __path__ is not guaranteed to exist, so we have to account for that
+    if pkg.__path__:
+        path = next(iter(pkg.__path__), None)
+        if path:
+            assert str(Path(path).resolve()) == expected
+
+
+def assert_link_to(file: Path, other: Path) -> None:
+    if file.is_symlink():
+        assert str(file.resolve()) == str(other.resolve())
+    else:
+        file_stat = file.stat()
+        other_stat = other.stat()
+        assert file_stat[stat.ST_INO] == other_stat[stat.ST_INO]
+        assert file_stat[stat.ST_DEV] == other_stat[stat.ST_DEV]
+
+
+def comparable_path(str_with_path: str) -> str:
+    return str_with_path.lower().replace(os.sep, "/").replace("//", "/")
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_glob.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_glob.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d225a44610163c7d56d65b07c06f0f598ccfe84
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_glob.py
@@ -0,0 +1,45 @@
+import pytest
+from jaraco import path
+
+from setuptools.glob import glob
+
+
+@pytest.mark.parametrize(
+    ('tree', 'pattern', 'matches'),
+    (
+        ('', b'', []),
+        ('', '', []),
+        (
+            """
+     appveyor.yml
+     CHANGES.rst
+     LICENSE
+     MANIFEST.in
+     pyproject.toml
+     README.rst
+     setup.cfg
+     setup.py
+     """,
+            '*.rst',
+            ('CHANGES.rst', 'README.rst'),
+        ),
+        (
+            """
+     appveyor.yml
+     CHANGES.rst
+     LICENSE
+     MANIFEST.in
+     pyproject.toml
+     README.rst
+     setup.cfg
+     setup.py
+     """,
+            b'*.rst',
+            (b'CHANGES.rst', b'README.rst'),
+        ),
+    ),
+)
+def test_glob(monkeypatch, tmpdir, tree, pattern, matches):
+    monkeypatch.chdir(tmpdir)
+    path.build({name: '' for name in tree.split()})
+    assert list(sorted(glob(pattern))) == list(sorted(matches))
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_namespaces.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_namespaces.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0f4120bf7900b2118cc066034e036ab7af1798b
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_namespaces.py
@@ -0,0 +1,138 @@
+import subprocess
+import sys
+
+from setuptools._path import paths_on_pythonpath
+
+from . import namespaces
+
+
+class TestNamespaces:
+    def test_mixed_site_and_non_site(self, tmpdir):
+        """
+        Installing two packages sharing the same namespace, one installed
+        to a site dir and the other installed just to a path on PYTHONPATH
+        should leave the namespace in tact and both packages reachable by
+        import.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
+        site_packages = tmpdir / 'site-packages'
+        path_packages = tmpdir / 'path-packages'
+        targets = site_packages, path_packages
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_A),
+            '-t',
+            str(site_packages),
+        ]
+        subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(site_packages)
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_B),
+            '-t',
+            str(path_packages),
+        ]
+        subprocess.check_call(install_cmd)
+        try_import = [
+            sys.executable,
+            '-c',
+            'import myns.pkgA; import myns.pkgB',
+        ]
+        with paths_on_pythonpath(map(str, targets)):
+            subprocess.check_call(try_import)
+
+    def test_pkg_resources_import(self, tmpdir):
+        """
+        Ensure that a namespace package doesn't break on import
+        of pkg_resources.
+        """
+        pkg = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        target = tmpdir / 'packages'
+        target.mkdir()
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip',
+            'install',
+            '-t',
+            str(target),
+            str(pkg),
+        ]
+        with paths_on_pythonpath([str(target)]):
+            subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(target)
+        try_import = [
+            sys.executable,
+            '-c',
+            'import pkg_resources',
+        ]
+        with paths_on_pythonpath([str(target)]):
+            subprocess.check_call(try_import)
+
+    def test_namespace_package_installed_and_cwd(self, tmpdir):
+        """
+        Installing a namespace packages but also having it in the current
+        working directory, only one version should take precedence.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        target = tmpdir / 'packages'
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_A),
+            '-t',
+            str(target),
+        ]
+        subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(target)
+
+        # ensure that package imports and pkg_resources imports
+        pkg_resources_imp = [
+            sys.executable,
+            '-c',
+            'import pkg_resources; import myns.pkgA',
+        ]
+        with paths_on_pythonpath([str(target)]):
+            subprocess.check_call(pkg_resources_imp, cwd=str(pkg_A))
+
+    def test_packages_in_the_same_namespace_installed_and_cwd(self, tmpdir):
+        """
+        Installing one namespace package and also have another in the same
+        namespace in the current working directory, both of them must be
+        importable.
+        """
+        pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
+        pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
+        target = tmpdir / 'packages'
+        # use pip to install to the target directory
+        install_cmd = [
+            sys.executable,
+            '-m',
+            'pip.__main__',
+            'install',
+            str(pkg_A),
+            '-t',
+            str(target),
+        ]
+        subprocess.check_call(install_cmd)
+        namespaces.make_site_dir(target)
+
+        # ensure that all packages import and pkg_resources imports
+        pkg_resources_imp = [
+            sys.executable,
+            '-c',
+            'import pkg_resources; import myns.pkgA; import myns.pkgB',
+        ]
+        with paths_on_pythonpath([str(target)]):
+            subprocess.check_call(pkg_resources_imp, cwd=str(pkg_B))
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_sandbox.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_sandbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..a476b7c93d7441834f415223cb386859581724f8
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_sandbox.py
@@ -0,0 +1,134 @@
+"""develop tests"""
+
+import os
+import types
+
+import pytest
+
+import pkg_resources
+import setuptools.sandbox
+
+
+class TestSandbox:
+    def test_devnull(self, tmpdir):
+        with setuptools.sandbox.DirectorySandbox(str(tmpdir)):
+            self._file_writer(os.devnull)
+
+    @staticmethod
+    def _file_writer(path):
+        def do_write():
+            with open(path, 'w', encoding="utf-8") as f:
+                f.write('xxx')
+
+        return do_write
+
+    def test_setup_py_with_BOM(self):
+        """
+        It should be possible to execute a setup.py with a Byte Order Mark
+        """
+        target = pkg_resources.resource_filename(__name__, 'script-with-bom.py')
+        namespace = types.ModuleType('namespace')
+        setuptools.sandbox._execfile(target, vars(namespace))
+        assert namespace.result == 'passed'
+
+    def test_setup_py_with_CRLF(self, tmpdir):
+        setup_py = tmpdir / 'setup.py'
+        with setup_py.open('wb') as stream:
+            stream.write(b'"degenerate script"\r\n')
+        setuptools.sandbox._execfile(str(setup_py), globals())
+
+
+class TestExceptionSaver:
+    def test_exception_trapped(self):
+        with setuptools.sandbox.ExceptionSaver():
+            raise ValueError("details")
+
+    def test_exception_resumed(self):
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            raise ValueError("details")
+
+        with pytest.raises(ValueError) as caught:
+            saved_exc.resume()
+
+        assert isinstance(caught.value, ValueError)
+        assert str(caught.value) == 'details'
+
+    def test_exception_reconstructed(self):
+        orig_exc = ValueError("details")
+
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            raise orig_exc
+
+        with pytest.raises(ValueError) as caught:
+            saved_exc.resume()
+
+        assert isinstance(caught.value, ValueError)
+        assert caught.value is not orig_exc
+
+    def test_no_exception_passes_quietly(self):
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            pass
+
+        saved_exc.resume()
+
+    def test_unpickleable_exception(self):
+        class CantPickleThis(Exception):
+            "This Exception is unpickleable because it's not in globals"
+
+            def __repr__(self) -> str:
+                return f'CantPickleThis{self.args!r}'
+
+        with setuptools.sandbox.ExceptionSaver() as saved_exc:
+            raise CantPickleThis('detail')
+
+        with pytest.raises(setuptools.sandbox.UnpickleableException) as caught:
+            saved_exc.resume()
+
+        assert str(caught.value) == "CantPickleThis('detail',)"
+
+    def test_unpickleable_exception_when_hiding_setuptools(self):
+        """
+        As revealed in #440, an infinite recursion can occur if an unpickleable
+        exception while setuptools is hidden. Ensure this doesn't happen.
+        """
+
+        class ExceptionUnderTest(Exception):
+            """
+            An unpickleable exception (not in globals).
+            """
+
+        with pytest.raises(setuptools.sandbox.UnpickleableException) as caught:
+            with setuptools.sandbox.save_modules():
+                setuptools.sandbox.hide_setuptools()
+                raise ExceptionUnderTest
+
+        (msg,) = caught.value.args
+        assert msg == 'ExceptionUnderTest()'
+
+    def test_sandbox_violation_raised_hiding_setuptools(self, tmpdir):
+        """
+        When in a sandbox with setuptools hidden, a SandboxViolation
+        should reflect a proper exception and not be wrapped in
+        an UnpickleableException.
+        """
+
+        def write_file():
+            "Trigger a SandboxViolation by writing outside the sandbox"
+            with open('/etc/foo', 'w', encoding="utf-8"):
+                pass
+
+        with pytest.raises(setuptools.sandbox.SandboxViolation) as caught:
+            with setuptools.sandbox.save_modules():
+                setuptools.sandbox.hide_setuptools()
+                with setuptools.sandbox.DirectorySandbox(str(tmpdir)):
+                    write_file()
+
+        cmd, args, kwargs = caught.value.args
+        assert cmd == 'open'
+        assert args == ('/etc/foo', 'w')
+        assert kwargs == {"encoding": "utf-8"}
+
+        msg = str(caught.value)
+        assert 'open' in msg
+        assert "('/etc/foo', 'w')" in msg
+        assert "{'encoding': 'utf-8'}" in msg
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_shutil_wrapper.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_shutil_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..74ff7e9a896328a3d57ca3639658e3b9d538585f
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_shutil_wrapper.py
@@ -0,0 +1,23 @@
+import stat
+import sys
+from unittest.mock import Mock
+
+from setuptools import _shutil
+
+
+def test_rmtree_readonly(monkeypatch, tmp_path):
+    """Verify onerr works as expected"""
+
+    tmp_dir = tmp_path / "with_readonly"
+    tmp_dir.mkdir()
+    some_file = tmp_dir.joinpath("file.txt")
+    some_file.touch()
+    some_file.chmod(stat.S_IREAD)
+
+    expected_count = 1 if sys.platform.startswith("win") else 0
+    chmod_fn = Mock(wraps=_shutil.attempt_chmod_verbose)
+    monkeypatch.setattr(_shutil, "attempt_chmod_verbose", chmod_fn)
+
+    _shutil.rmtree(tmp_dir)
+    assert chmod_fn.call_count == expected_count
+    assert not tmp_dir.is_dir()
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/textwrap.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/textwrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e39618dca4ad6c3f0d4c8cb20af59ab85fb0eba
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/textwrap.py
@@ -0,0 +1,6 @@
+import textwrap
+
+
+def DALS(s):
+    "dedent and left-strip"
+    return textwrap.dedent(s).lstrip()
diff --git a/videollama2/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz b/videollama2/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz
new file mode 100644
index 0000000000000000000000000000000000000000..010258ddd3f64ab3d63665f106946a34b241d68e
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/data-v1-dl-21552912.arff.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f623e777c0a36ae6c82fae10a7c2088cb383298ea244595bf8dc95449c9be4c4
+size 2326
diff --git a/videollama2/lib/python3.10/site-packages/websockets/server.py b/videollama2/lib/python3.10/site-packages/websockets/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecb0f74a692d76c7d225fb6f7319e23e7c6b25be
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/websockets/server.py
@@ -0,0 +1,575 @@
+from __future__ import annotations
+
+import base64
+import binascii
+import email.utils
+import http
+import warnings
+from typing import Any, Callable, Generator, List, Optional, Sequence, Tuple, cast
+
+from .datastructures import Headers, MultipleValuesError
+from .exceptions import (
+    InvalidHandshake,
+    InvalidHeader,
+    InvalidHeaderValue,
+    InvalidOrigin,
+    InvalidStatus,
+    InvalidUpgrade,
+    NegotiationError,
+)
+from .extensions import Extension, ServerExtensionFactory
+from .headers import (
+    build_extension,
+    parse_connection,
+    parse_extension,
+    parse_subprotocol,
+    parse_upgrade,
+)
+from .http11 import Request, Response
+from .protocol import CONNECTING, OPEN, SERVER, Protocol, State
+from .typing import (
+    ConnectionOption,
+    ExtensionHeader,
+    LoggerLike,
+    Origin,
+    Subprotocol,
+    UpgradeProtocol,
+)
+from .utils import accept_key
+
+
+# See #940 for why lazy_import isn't used here for backwards compatibility.
+from .legacy.server import *  # isort:skip  # noqa: I001
+
+
+__all__ = ["ServerProtocol"]
+
+
+class ServerProtocol(Protocol):
+    """
+    Sans-I/O implementation of a WebSocket server connection.
+
+    Args:
+        origins: acceptable values of the ``Origin`` header; include
+            :obj:`None` in the list if the lack of an origin is acceptable.
+            This is useful for defending against Cross-Site WebSocket
+            Hijacking attacks.
+        extensions: list of supported extensions, in order in which they
+            should be tried.
+        subprotocols: list of supported subprotocols, in order of decreasing
+            preference.
+        select_subprotocol: Callback for selecting a subprotocol among
+            those supported by the client and the server. It has the same
+            signature as the :meth:`select_subprotocol` method, including a
+            :class:`ServerProtocol` instance as first argument.
+        state: initial state of the WebSocket connection.
+        max_size: maximum size of incoming messages in bytes;
+            :obj:`None` disables the limit.
+        logger: logger for this connection;
+            defaults to ``logging.getLogger("websockets.client")``;
+            see the :doc:`logging guide <../../topics/logging>` for details.
+
+    """
+
+    def __init__(
+        self,
+        *,
+        origins: Optional[Sequence[Optional[Origin]]] = None,
+        extensions: Optional[Sequence[ServerExtensionFactory]] = None,
+        subprotocols: Optional[Sequence[Subprotocol]] = None,
+        select_subprotocol: Optional[
+            Callable[
+                [ServerProtocol, Sequence[Subprotocol]],
+                Optional[Subprotocol],
+            ]
+        ] = None,
+        state: State = CONNECTING,
+        max_size: Optional[int] = 2**20,
+        logger: Optional[LoggerLike] = None,
+    ):
+        super().__init__(
+            side=SERVER,
+            state=state,
+            max_size=max_size,
+            logger=logger,
+        )
+        self.origins = origins
+        self.available_extensions = extensions
+        self.available_subprotocols = subprotocols
+        if select_subprotocol is not None:
+            # Bind select_subprotocol then shadow self.select_subprotocol.
+            # Use setattr to work around https://github.com/python/mypy/issues/2427.
+            setattr(
+                self,
+                "select_subprotocol",
+                select_subprotocol.__get__(self, self.__class__),
+            )
+
+    def accept(self, request: Request) -> Response:
+        """
+        Create a handshake response to accept the connection.
+
+        If the connection cannot be established, the handshake response
+        actually rejects the handshake.
+
+        You must send the handshake response with :meth:`send_response`.
+
+        You may modify it before sending it, for example to add HTTP headers.
+
+        Args:
+            request: WebSocket handshake request event received from the client.
+
+        Returns:
+            WebSocket handshake response event to send to the client.
+
+        """
+        try:
+            (
+                accept_header,
+                extensions_header,
+                protocol_header,
+            ) = self.process_request(request)
+        except InvalidOrigin as exc:
+            request._exception = exc
+            self.handshake_exc = exc
+            if self.debug:
+                self.logger.debug("! invalid origin", exc_info=True)
+            return self.reject(
+                http.HTTPStatus.FORBIDDEN,
+                f"Failed to open a WebSocket connection: {exc}.\n",
+            )
+        except InvalidUpgrade as exc:
+            request._exception = exc
+            self.handshake_exc = exc
+            if self.debug:
+                self.logger.debug("! invalid upgrade", exc_info=True)
+            response = self.reject(
+                http.HTTPStatus.UPGRADE_REQUIRED,
+                (
+                    f"Failed to open a WebSocket connection: {exc}.\n"
+                    f"\n"
+                    f"You cannot access a WebSocket server directly "
+                    f"with a browser. You need a WebSocket client.\n"
+                ),
+            )
+            response.headers["Upgrade"] = "websocket"
+            return response
+        except InvalidHandshake as exc:
+            request._exception = exc
+            self.handshake_exc = exc
+            if self.debug:
+                self.logger.debug("! invalid handshake", exc_info=True)
+            return self.reject(
+                http.HTTPStatus.BAD_REQUEST,
+                f"Failed to open a WebSocket connection: {exc}.\n",
+            )
+        except Exception as exc:
+            # Handle exceptions raised by user-provided select_subprotocol and
+            # unexpected errors.
+            request._exception = exc
+            self.handshake_exc = exc
+            self.logger.error("opening handshake failed", exc_info=True)
+            return self.reject(
+                http.HTTPStatus.INTERNAL_SERVER_ERROR,
+                (
+                    "Failed to open a WebSocket connection.\n"
+                    "See server log for more information.\n"
+                ),
+            )
+
+        headers = Headers()
+
+        headers["Date"] = email.utils.formatdate(usegmt=True)
+
+        headers["Upgrade"] = "websocket"
+        headers["Connection"] = "Upgrade"
+        headers["Sec-WebSocket-Accept"] = accept_header
+
+        if extensions_header is not None:
+            headers["Sec-WebSocket-Extensions"] = extensions_header
+
+        if protocol_header is not None:
+            headers["Sec-WebSocket-Protocol"] = protocol_header
+
+        self.logger.info("connection open")
+        return Response(101, "Switching Protocols", headers)
+
+    def process_request(
+        self,
+        request: Request,
+    ) -> Tuple[str, Optional[str], Optional[str]]:
+        """
+        Check a handshake request and negotiate extensions and subprotocol.
+
+        This function doesn't verify that the request is an HTTP/1.1 or higher
+        GET request and doesn't check the ``Host`` header. These controls are
+        usually performed earlier in the HTTP request handling code. They're
+        the responsibility of the caller.
+
+        Args:
+            request: WebSocket handshake request received from the client.
+
+        Returns:
+            Tuple[str, Optional[str], Optional[str]]:
+            ``Sec-WebSocket-Accept``, ``Sec-WebSocket-Extensions``, and
+            ``Sec-WebSocket-Protocol`` headers for the handshake response.
+
+        Raises:
+            InvalidHandshake: if the handshake request is invalid;
+                then the server must return 400 Bad Request error.
+
+        """
+        headers = request.headers
+
+        connection: List[ConnectionOption] = sum(
+            [parse_connection(value) for value in headers.get_all("Connection")], []
+        )
+
+        if not any(value.lower() == "upgrade" for value in connection):
+            raise InvalidUpgrade(
+                "Connection", ", ".join(connection) if connection else None
+            )
+
+        upgrade: List[UpgradeProtocol] = sum(
+            [parse_upgrade(value) for value in headers.get_all("Upgrade")], []
+        )
+
+        # For compatibility with non-strict implementations, ignore case when
+        # checking the Upgrade header. The RFC always uses "websocket", except
+        # in section 11.2. (IANA registration) where it uses "WebSocket".
+        if not (len(upgrade) == 1 and upgrade[0].lower() == "websocket"):
+            raise InvalidUpgrade("Upgrade", ", ".join(upgrade) if upgrade else None)
+
+        try:
+            key = headers["Sec-WebSocket-Key"]
+        except KeyError as exc:
+            raise InvalidHeader("Sec-WebSocket-Key") from exc
+        except MultipleValuesError as exc:
+            raise InvalidHeader(
+                "Sec-WebSocket-Key", "more than one Sec-WebSocket-Key header found"
+            ) from exc
+
+        try:
+            raw_key = base64.b64decode(key.encode(), validate=True)
+        except binascii.Error as exc:
+            raise InvalidHeaderValue("Sec-WebSocket-Key", key) from exc
+        if len(raw_key) != 16:
+            raise InvalidHeaderValue("Sec-WebSocket-Key", key)
+
+        try:
+            version = headers["Sec-WebSocket-Version"]
+        except KeyError as exc:
+            raise InvalidHeader("Sec-WebSocket-Version") from exc
+        except MultipleValuesError as exc:
+            raise InvalidHeader(
+                "Sec-WebSocket-Version",
+                "more than one Sec-WebSocket-Version header found",
+            ) from exc
+
+        if version != "13":
+            raise InvalidHeaderValue("Sec-WebSocket-Version", version)
+
+        accept_header = accept_key(key)
+
+        self.origin = self.process_origin(headers)
+
+        extensions_header, self.extensions = self.process_extensions(headers)
+
+        protocol_header = self.subprotocol = self.process_subprotocol(headers)
+
+        return (
+            accept_header,
+            extensions_header,
+            protocol_header,
+        )
+
+    def process_origin(self, headers: Headers) -> Optional[Origin]:
+        """
+        Handle the Origin HTTP request header.
+
+        Args:
+            headers: WebSocket handshake request headers.
+
+        Returns:
+           Optional[Origin]: origin, if it is acceptable.
+
+        Raises:
+            InvalidHandshake: if the Origin header is invalid.
+            InvalidOrigin: if the origin isn't acceptable.
+
+        """
+        # "The user agent MUST NOT include more than one Origin header field"
+        # per https://www.rfc-editor.org/rfc/rfc6454.html#section-7.3.
+        try:
+            origin = cast(Optional[Origin], headers.get("Origin"))
+        except MultipleValuesError as exc:
+            raise InvalidHeader("Origin", "more than one Origin header found") from exc
+        if self.origins is not None:
+            if origin not in self.origins:
+                raise InvalidOrigin(origin)
+        return origin
+
+    def process_extensions(
+        self,
+        headers: Headers,
+    ) -> Tuple[Optional[str], List[Extension]]:
+        """
+        Handle the Sec-WebSocket-Extensions HTTP request header.
+
+        Accept or reject each extension proposed in the client request.
+        Negotiate parameters for accepted extensions.
+
+        Per :rfc:`6455`, negotiation rules are defined by the specification of
+        each extension.
+
+        To provide this level of flexibility, for each extension proposed by
+        the client, we check for a match with each extension available in the
+        server configuration. If no match is found, the extension is ignored.
+
+        If several variants of the same extension are proposed by the client,
+        it may be accepted several times, which won't make sense in general.
+        Extensions must implement their own requirements. For this purpose,
+        the list of previously accepted extensions is provided.
+
+        This process doesn't allow the server to reorder extensions. It can
+        only select a subset of the extensions proposed by the client.
+
+        Other requirements, for example related to mandatory extensions or the
+        order of extensions, may be implemented by overriding this method.
+
+        Args:
+            headers: WebSocket handshake request headers.
+
+        Returns:
+            Tuple[Optional[str], List[Extension]]: ``Sec-WebSocket-Extensions``
+            HTTP response header and list of accepted extensions.
+
+        Raises:
+            InvalidHandshake: if the Sec-WebSocket-Extensions header is invalid.
+
+        """
+        response_header_value: Optional[str] = None
+
+        extension_headers: List[ExtensionHeader] = []
+        accepted_extensions: List[Extension] = []
+
+        header_values = headers.get_all("Sec-WebSocket-Extensions")
+
+        if header_values and self.available_extensions:
+            parsed_header_values: List[ExtensionHeader] = sum(
+                [parse_extension(header_value) for header_value in header_values], []
+            )
+
+            for name, request_params in parsed_header_values:
+                for ext_factory in self.available_extensions:
+                    # Skip non-matching extensions based on their name.
+                    if ext_factory.name != name:
+                        continue
+
+                    # Skip non-matching extensions based on their params.
+                    try:
+                        response_params, extension = ext_factory.process_request_params(
+                            request_params, accepted_extensions
+                        )
+                    except NegotiationError:
+                        continue
+
+                    # Add matching extension to the final list.
+                    extension_headers.append((name, response_params))
+                    accepted_extensions.append(extension)
+
+                    # Break out of the loop once we have a match.
+                    break
+
+                # If we didn't break from the loop, no extension in our list
+                # matched what the client sent. The extension is declined.
+
+        # Serialize extension header.
+        if extension_headers:
+            response_header_value = build_extension(extension_headers)
+
+        return response_header_value, accepted_extensions
+
+    def process_subprotocol(self, headers: Headers) -> Optional[Subprotocol]:
+        """
+        Handle the Sec-WebSocket-Protocol HTTP request header.
+
+        Args:
+            headers: WebSocket handshake request headers.
+
+        Returns:
+           Optional[Subprotocol]: Subprotocol, if one was selected; this is
+           also the value of the ``Sec-WebSocket-Protocol`` response header.
+
+        Raises:
+            InvalidHandshake: if the Sec-WebSocket-Subprotocol header is invalid.
+
+        """
+        subprotocols: Sequence[Subprotocol] = sum(
+            [
+                parse_subprotocol(header_value)
+                for header_value in headers.get_all("Sec-WebSocket-Protocol")
+            ],
+            [],
+        )
+
+        return self.select_subprotocol(subprotocols)
+
+    def select_subprotocol(
+        self,
+        subprotocols: Sequence[Subprotocol],
+    ) -> Optional[Subprotocol]:
+        """
+        Pick a subprotocol among those offered by the client.
+
+        If several subprotocols are supported by both the client and the server,
+        pick the first one in the list declared the server.
+
+        If the server doesn't support any subprotocols, continue without a
+        subprotocol, regardless of what the client offers.
+
+        If the server supports at least one subprotocol and the client doesn't
+        offer any, abort the handshake with an HTTP 400 error.
+
+        You provide a ``select_subprotocol`` argument to :class:`ServerProtocol`
+        to override this logic. For example, you could accept the connection
+        even if client doesn't offer a subprotocol, rather than reject it.
+
+        Here's how to negotiate the ``chat`` subprotocol if the client supports
+        it and continue without a subprotocol otherwise::
+
+            def select_subprotocol(protocol, subprotocols):
+                if "chat" in subprotocols:
+                    return "chat"
+
+        Args:
+            subprotocols: list of subprotocols offered by the client.
+
+        Returns:
+            Optional[Subprotocol]: Selected subprotocol, if a common subprotocol
+            was found.
+
+            :obj:`None` to continue without a subprotocol.
+
+        Raises:
+            NegotiationError: custom implementations may raise this exception
+                to abort the handshake with an HTTP 400 error.
+
+        """
+        # Server doesn't offer any subprotocols.
+        if not self.available_subprotocols:  # None or empty list
+            return None
+
+        # Server offers at least one subprotocol but client doesn't offer any.
+        if not subprotocols:
+            raise NegotiationError("missing subprotocol")
+
+        # Server and client both offer subprotocols. Look for a shared one.
+        proposed_subprotocols = set(subprotocols)
+        for subprotocol in self.available_subprotocols:
+            if subprotocol in proposed_subprotocols:
+                return subprotocol
+
+        # No common subprotocol was found.
+        raise NegotiationError(
+            "invalid subprotocol; expected one of "
+            + ", ".join(self.available_subprotocols)
+        )
+
+    def reject(
+        self,
+        status: http.HTTPStatus,
+        text: str,
+    ) -> Response:
+        """
+        Create a handshake response to reject the connection.
+
+        A short plain text response is the best fallback when failing to
+        establish a WebSocket connection.
+
+        You must send the handshake response with :meth:`send_response`.
+
+        You can modify it before sending it, for example to alter HTTP headers.
+
+        Args:
+            status: HTTP status code.
+            text: HTTP response body; will be encoded to UTF-8.
+
+        Returns:
+            Response: WebSocket handshake response event to send to the client.
+
+        """
+        body = text.encode()
+        headers = Headers(
+            [
+                ("Date", email.utils.formatdate(usegmt=True)),
+                ("Connection", "close"),
+                ("Content-Length", str(len(body))),
+                ("Content-Type", "text/plain; charset=utf-8"),
+            ]
+        )
+        response = Response(status.value, status.phrase, headers, body)
+        # When reject() is called from accept(), handshake_exc is already set.
+        # If a user calls reject(), set handshake_exc to guarantee invariant:
+        # "handshake_exc is None if and only if opening handshake succeeded."
+        if self.handshake_exc is None:
+            self.handshake_exc = InvalidStatus(response)
+        self.logger.info("connection failed (%d %s)", status.value, status.phrase)
+        return response
+
+    def send_response(self, response: Response) -> None:
+        """
+        Send a handshake response to the client.
+
+        Args:
+            response: WebSocket handshake response event to send.
+
+        """
+        if self.debug:
+            code, phrase = response.status_code, response.reason_phrase
+            self.logger.debug("> HTTP/1.1 %d %s", code, phrase)
+            for key, value in response.headers.raw_items():
+                self.logger.debug("> %s: %s", key, value)
+            if response.body is not None:
+                self.logger.debug("> [body] (%d bytes)", len(response.body))
+
+        self.writes.append(response.serialize())
+
+        if response.status_code == 101:
+            assert self.state is CONNECTING
+            self.state = OPEN
+        else:
+            self.send_eof()
+            self.parser = self.discard()
+            next(self.parser)  # start coroutine
+
+    def parse(self) -> Generator[None, None, None]:
+        if self.state is CONNECTING:
+            try:
+                request = yield from Request.parse(
+                    self.reader.read_line,
+                )
+            except Exception as exc:
+                self.handshake_exc = exc
+                self.send_eof()
+                self.parser = self.discard()
+                next(self.parser)  # start coroutine
+                yield
+
+            if self.debug:
+                self.logger.debug("< GET %s HTTP/1.1", request.path)
+                for key, value in request.headers.raw_items():
+                    self.logger.debug("< %s: %s", key, value)
+
+            self.events.append(request)
+
+        yield from super().parse()
+
+
+class ServerConnection(ServerProtocol):
+    def __init__(self, *args: Any, **kwargs: Any) -> None:
+        warnings.warn(
+            "ServerConnection was renamed to ServerProtocol",
+            DeprecationWarning,
+        )
+        super().__init__(*args, **kwargs)
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h
new file mode 100644
index 0000000000000000000000000000000000000000..3f3864077e2106650ca4ea126740f92d4ba3f824
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h
@@ -0,0 +1,167 @@
+#pragma once
+
+#include 
+#include 
+
+#include 
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// Singleton class per worker which is responsible for storing the distributed
+// autograd context for each autograd pass and also cleans up data for an
+// autograd pass once its done.
+//
+// Each autograd pass is assigned a unique autograd_context_id and all data for
+// that pass (DistAutogradContext) is stored in this container indexed by the
+// autograd_context_id. The autograd_context_id itself is a 64 bit globally
+// unique id. The first 16 bits is the worker_id and the next 48 bits is an
+// auto-incrementing id for each worker.
+//
+// This container is also responsible for maintaining a globally unique message
+// id, which is used to associate send/recv autograd function pairs. The format
+// is similar to the autograd_context_id where we have a 64 bit integer with
+// first 16 bits being the worker id and next 48 bits are auto-incrementing.
+class TORCH_API DistAutogradContainer {
+ public:
+  explicit DistAutogradContainer(uint32_t num_shards);
+
+  // One time initialization of the container.
+  static DistAutogradContainer& init(int64_t worker_id);
+
+  // Retrieve the singleton instance of the container, ensures we have
+  // initialized the container.
+  static DistAutogradContainer& getInstance();
+
+  // Create a new context for a distributed autograd pass.
+  const ContextPtr newContext();
+
+  // Clean up resources for a given context_id once the autograd pass is done.
+  // Sends RPC to other workers this worker knows about, telling them to clean
+  // up their context as well. Throws an exception if the context_id does not
+  // exist.
+  void releaseContext(int64_t context_id);
+
+  // Releases an autograd context if it is present on this node. Also sends RPC
+  // to other workers this worker knows about, telling them to clean up their
+  // context. Does nothing if it is not present.
+  void releaseContextIfPresent(int64_t context_id);
+
+  // Checks if the passed in context_id is valid.
+  void isValidContext(int64_t context_id);
+
+  // Retrieve the autograd context for a given context_id.
+  ContextPtr retrieveContext(int64_t context_id);
+
+  // Retrieves the currently active autograd context for the current thread.
+  ContextPtr currentContext();
+
+  // Checks whether or not the current thread has a valid autograd context.
+  bool hasValidContext() const;
+
+  // Generate a new autograd_message_id for send/recv autograd functions.
+  int64_t newAutogradMessageId();
+
+  // Creates a new autograd context with the provided context_id. If a context
+  // already exists with the provided context_id, we just return it.
+  // This does not set the current context for the current thread.
+  ContextPtr getOrCreateContext(int64_t context_id);
+
+  // Retrieves the maximum possible autograd_context_id/autograd_message_id that
+  // can be generated by this worker.
+  int64_t getMaxId();
+
+  // Retrieves the worker ID for this node
+  rpc::worker_id_t getWorkerId() const;
+
+  // Can set current context id if there is no valid context yet
+  static void setCurrentContextId(int64_t contextId);
+
+  // Forcibly sets the thread local current context id. Should only be used in
+  // cases where you know what you're doing and need to override the thread
+  // local. Otherwise, use setCurrentContextId instead.
+  static void forceCurrentContextId(int64_t contextId);
+
+  // Clear current context id
+  void clearCurrentContext();
+
+  // Returns the number of autograd contexts in the container.
+  size_t numAutogradContexts() const;
+
+  // Returns the current thread local context id for this thread.
+  static int64_t currentContextId();
+
+  DistAutogradContainer(const DistAutogradContainer&) = delete;
+  DistAutogradContainer& operator=(const DistAutogradContainer&) = delete;
+  DistAutogradContainer(DistAutogradContainer&&) = delete;
+  DistAutogradContainer& operator=(DistAutogradContainer&&) = delete;
+
+ private:
+  // Number of shards for the map storing autograd contexts. We'd like this
+  // to be a power of 2 and we don't expect a value much higher than the
+  // number of cores would provide much benefit.
+  static constexpr uint32_t kNumDefaultShards = 128;
+
+  // Use cache line size for alignment.
+  static constexpr int kCacheLineSize = 64;
+
+  // Structure holding one shard of the sharded autograd context map with its
+  // associated lock. Align to cache line size to avoid contention between
+  // adjacent entries.
+  struct alignas(kCacheLineSize) ContextsShard {
+    // Lock for this shard.
+    mutable std::mutex lock;
+
+    // Map storing autograd contexts for this shard.
+    std::unordered_map contexts;
+  };
+
+  DistAutogradContainer() = delete;
+  ~DistAutogradContainer() = default;
+
+  static DistAutogradContainer& getInstanceInternal();
+
+  // Retrieve the shard for given context_id.
+  ContextsShard& getShard(int64_t context_id);
+
+  // Sends an RPC to the workers that have a context corresponding to passed in
+  // context_id. This function should be called with the lock.
+  void sendReleaseContextRpc(
+      const std::unordered_set& workerIds,
+      int64_t context_id);
+
+  // Erase context_id from the autograd context map, and reset the thread local
+  // current context id if it corresponds to the passed in context id. This
+  // function should be called with the lock.
+  void eraseContextIdAndReset(ContextsShard& shard, int64_t context_id);
+
+  // Compute the number of shards for the autograd_contexts_ map.
+  static uint32_t computeNumShards();
+
+  // Auto incrementing context id used to identify unique autograd passes.
+  // Initialized with the first 16 bits being the worker_id.
+  std::atomic next_context_id_;
+
+  // Unique id to identify a worker in the distributed setting.
+  int16_t worker_id_;
+
+  // Whether or not the container has been initialized appropriately.
+  bool initialized_;
+
+  // Sharded autograd context map.
+  std::vector autograd_contexts_;
+
+  // Number of shards for the sharded autograd_contexts_ map.
+  uint32_t num_shards_;
+
+  // Autograd message id to identify unique send/recv autograd function pairs.
+  std::atomic next_autograd_message_id_;
+
+  // Maximum allowed value for autograd_context_id or autograd_message_id.
+  int64_t max_id_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h
new file mode 100644
index 0000000000000000000000000000000000000000..bf8bb7cdef8c06cd638532c4671cd2875b7d7796
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/context.h
@@ -0,0 +1,174 @@
+#pragma once
+
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+class RecvRpcBackward;
+
+// DistAutogradContext which stores information for a single distributed
+// autograd pass on a worker.
+class TORCH_API DistAutogradContext {
+ public:
+  using GradCallback = std::function;
+
+  explicit DistAutogradContext(int64_t contextId);
+
+  // Retrieves the autograd context id for this context.
+  int64_t contextId() const;
+
+  // Records a 'send' autograd function for this context with the provided
+  // message id.
+  void addSendFunction(
+      const std::shared_ptr& func,
+      int64_t autograd_message_id);
+
+  // Records a 'recv' autograd function for this context with the provided
+  // message id.
+  void addRecvFunction(
+      std::shared_ptr& func,
+      int64_t autograd_message_id);
+
+  // Given an autograd_message_id, retrieve the appropriate send function.
+  std::shared_ptr retrieveSendFunction(
+      int64_t autograd_message_id);
+
+  // Return all send functions for this context.
+  std::unordered_map> sendFunctions()
+      const;
+
+  // Return all recv functions for this context.
+  std::unordered_map> recvFunctions()
+      const;
+
+  // Adds a future message recording an outstanding RPC.
+  void addOutstandingRpc(const c10::intrusive_ptr& jitFuture);
+
+  // Returns all gradients.
+  const c10::Dict getGradients() const;
+
+  // This function gives a mutable grad reference to the callback.
+  // If the callback returns true, it means the grad in the context
+  // needs to be updated.
+  void runGradCallbackForVariable(
+      const torch::autograd::Variable& variable,
+      GradCallback&& cb);
+
+  DistAutogradContext(const DistAutogradContext&) = delete;
+  DistAutogradContext& operator=(const DistAutogradContext&) = delete;
+  DistAutogradContext(DistAutogradContext&&) = delete;
+  DistAutogradContext& operator=(DistAutogradContext&&) = delete;
+
+  // records the workerID of a node that we sent an RPC to.
+  // workerIDs are added here when we attach a send function to this autograd
+  // context
+  void addKnownWorkerId(const rpc::worker_id_t workerId);
+
+  // Retrieves a set containing the known workerIds for this context
+  // These are the different workers that this context has sent RPCs to.
+  std::unordered_set getKnownWorkerIds() const;
+
+ private:
+  friend class BackwardPassCleanupGuard;
+  friend class DistEngine;
+  friend class RecvRpcBackward;
+  friend class DistAccumulateGradCaptureHook;
+
+  // Record that we would like to accumulate the provided gradient on the given
+  // variable.
+  void accumulateGrad(
+      const torch::autograd::Variable& variable,
+      const torch::Tensor& grad,
+      size_t num_expected_refs);
+
+  // Retrieve the GraphTask.
+  std::shared_ptr retrieveGraphTask();
+
+  // Set the appropriate graph task for the backward pass. Can be called only
+  // once.
+  void setGraphTask(std::shared_ptr graphTask);
+
+  // Resets the graph task to ensure we can run another distributed backward
+  // pass for the same autograd context.
+  void resetGraphTask();
+
+  // Waits for all outstanding RPCs for this context to finish and clears all
+  // outstanding rpcs held in this context. This should be called only once.
+  c10::intrusive_ptr clearAndWaitForOutstandingRpcsAsync();
+
+  void clearOutstandingRpcs();
+
+  // Record an event to mark the completion of gradient computation. These
+  // events will later help to properly synchronize gradients consumptions
+  // in getGradients(). We need these events because backward and
+  // optimizer.step are separate RPC calls, and will occur on different CUDA
+  // streams. Without synchronization, it is possible that gradients are
+  // consumed before they are ready.
+  void recordGradEvent(c10::Device device);
+
+  const int64_t contextId_;
+
+  // Set containing known worker IDs, used in cleaning up autograd context.
+  // Whenever a sendRpcBackward is attached to the autograd graph for this
+  // context, the destination is added here.
+  std::unordered_set knownWorkerIds_;
+
+  // Map from autograd_message_id to appropriate 'send' autograd function.
+  std::unordered_map>
+      sendAutogradFunctions_;
+
+  // Map from autograd_message_id to appropriate 'recv' autograd function.
+  std::unordered_map>
+      recvAutogradFunctions_;
+
+  // Gradients accumulated in this context so far. The key is the variable on
+  // which the gradient needs to be accumulated and the value is the gradient
+  // that needs to be accumulated on that variable..
+  c10::Dict accumulatedGrads_;
+
+  // See comments for recordGradEvent(c10::Device device);
+  std::unordered_map gradReadyEvents_;
+  const c10::impl::VirtualGuardImpl impl_;
+
+  // The autograd GraphTask for the backward pass on this node for this context.
+  std::shared_ptr graphTask_;
+
+  // List of futures for RPCs initiated by this node to propagate gradients to
+  // other nodes. The distributed autograd engine on this node can return
+  // successfully only if all these futures are done and are successful.
+  std::vector> outStandingRpcs_;
+
+  // Lock to protect concurrent modification of the context.
+  mutable std::mutex lock_;
+};
+
+using ContextPtr = std::shared_ptr;
+
+// This class stores a shared_ptr to a DistAutogradContext instance in a
+// thread local variable. The instance is given by the call site. The class
+// doesn't know the current context. It's just a util class.
+class TORCH_API ThreadLocalDistAutogradContext {
+ public:
+  // Store 'new_context' to the thread local variable maintained by this class.
+  explicit ThreadLocalDistAutogradContext(ContextPtr&& new_context);
+  ~ThreadLocalDistAutogradContext();
+
+  // Retrieve the stored DistAutogradContext instance.
+  static ContextPtr getContextPtr();
+
+ private:
+  ContextPtr prev_context_ptr_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h
new file mode 100644
index 0000000000000000000000000000000000000000..6e6678b1289859eb162a96ccd3063d94a9e5e0fe
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/recvrpc_backward.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// Forward declarations.
+class DistAutogradContext;
+
+// As part of our distributed autograd implementation, whenever we receive an
+// RPC from a node, we add a 'RecvRpcBackward' autograd function to the
+// autograd graph. This is more or less a placeholder function that is used to
+// pass gradients to the remote host during the backward pass. The inputs to the
+// RPC function are the inputs to this autograd function.
+class TORCH_API RecvRpcBackward : public torch::autograd::Node {
+ public:
+  explicit RecvRpcBackward(
+      const AutogradMetadata& autogradMetadata,
+      std::shared_ptr autogradContext,
+      rpc::worker_id_t fromWorkerId,
+      rpc::DeviceMap deviceMap);
+
+  torch::autograd::variable_list apply(
+      torch::autograd::variable_list&& grads) override;
+
+ private:
+  const AutogradMetadata autogradMetadata_;
+
+  // Hold a weak reference to the autograd context to avoid circular
+  // dependencies with the context (since it holds a reference to
+  // RecvRpcBackward).
+  std::weak_ptr autogradContext_;
+
+  // The worker id from which the RPC was received. During the backward pass,
+  // we need to propagate the gradients to this workerId.
+  rpc::worker_id_t fromWorkerId_;
+
+  // Device mapping for tensors sent over RPC.
+  const rpc::DeviceMap deviceMap_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h
new file mode 100644
index 0000000000000000000000000000000000000000..ff576ace174fdfae29abafdec3678532e03cc29d
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#include 
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// As part of our distributed autograd implementation, whenever we send an RPC
+// from one node to another, we add a 'SendRpcBackward' autograd function to the
+// autograd graph. This is more or less a placeholder function that is used to
+// kickoff the autograd engine on the current worker on the backward pass. The
+// edges for this autograd function are the inputs to the RPC method.
+//
+// During the backward pass, this function is queued for execution in the
+// autograd engine which eventually runs the rest of the autograd graph.
+struct TORCH_API SendRpcBackward : public torch::autograd::Node {
+ public:
+  torch::autograd::variable_list apply(
+      torch::autograd::variable_list&& inputs) override;
+
+  // SendRpcBackward is actually the root of an autograd graph on the local
+  // node. As a result, it doesn't receive any 'inputs', but rather the RPC
+  // framework passes gradients over to this function to kickoff local autograd
+  // computation.
+  void setGrads(const torch::autograd::variable_list& grads);
+
+  // Retrieve the grads for the function.
+  const torch::autograd::variable_list& getGrads() const;
+
+ private:
+  torch::autograd::variable_list grads_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h
new file mode 100644
index 0000000000000000000000000000000000000000..6dc4413cfa50980af4df98bd88c9fd57e86a2a75
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.h
@@ -0,0 +1,39 @@
+#pragma once
+
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace distributed {
+namespace autograd {
+
+// Internal system RPC to invoke distributed backward pass on remote nodes when
+// 'rref.backward()' is invoked.
+class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase {
+ public:
+  RRefBackwardReq(
+      const rpc::RRefId& rrefId,
+      int64_t autogradContextId,
+      bool retainGraph = false);
+
+  const rpc::RRefId& getRRefId() const;
+
+  int64_t getAutogradContextId() const;
+
+  bool retainGraph() const;
+
+  // Serialization and deserialization methods.
+  c10::intrusive_ptr toMessageImpl() && override;
+  static std::unique_ptr fromMessage(
+      const rpc::Message& message);
+
+ private:
+  const rpc::RRefId rrefId_;
+  const int64_t autogradContextId_;
+  const bool retainGraph_;
+};
+
+} // namespace autograd
+} // namespace distributed
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b75e457b8cd01c4344b93c3eb3231e297784d19d
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backend.hpp
@@ -0,0 +1,416 @@
+#pragma once
+
+#include 
+#include 
+#include 
+
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+constexpr auto kBackendDefaultTimeout =
+    std::chrono::milliseconds(30 * 60 * 1000);
+
+namespace c10d {
+
+class TORCH_API Backend : public torch::CustomClassHolder {
+ public:
+  // Backend Options is a base struct that defines the basic options
+  // when constructing a Backend. Each Backend subclass should
+  // extend this struct and define its options if it wants to provide more
+  // config options (beyond basic ones defined here) to end user.
+  struct TORCH_API Options : torch::CustomClassHolder {
+    explicit Options(
+        std::string backend,
+        std::chrono::milliseconds timeout = kBackendDefaultTimeout)
+        : timeout(timeout), backend(std::move(backend)) {}
+    ~Options() override = default;
+
+    std::chrono::milliseconds timeout;
+
+    // backend name
+    // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
+    const std::string backend;
+  };
+
+  explicit Backend(int rank, int size);
+  ~Backend() override = 0;
+
+  int getRank() const {
+    return rank_;
+  }
+
+  int getSize() const {
+    return size_;
+  }
+
+  // Returns an unique opaque ID of this backend that can be used to correlate
+  // with its collectives.
+  int64_t getID() const {
+    return reinterpret_cast(this);
+  }
+
+  virtual bool supportsSplitting() const {
+    return false;
+  }
+
+  virtual void startCoalescing() {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not implement startCoalescing"));
+  }
+
+  virtual c10::intrusive_ptr endCoalescing() {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ", getBackendName(), " does not implement endCoalescing"));
+  }
+
+  // Subclasses must override this method to return the backend name
+  virtual const std::string getBackendName() const {
+    TORCH_INTERNAL_ASSERT(false, "getBackendName is not implemented.");
+  };
+
+  virtual c10::intrusive_ptr broadcast(
+      std::vector& /* tensors */,
+      const BroadcastOptions& /* opts */ = BroadcastOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support broadcast"));
+  }
+
+  virtual c10::intrusive_ptr allreduce(
+      std::vector& /* tensors */,
+      const AllreduceOptions& /* opts */ = AllreduceOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support allreduce"));
+  }
+
+  virtual c10::intrusive_ptr allreduce_sparse(
+      std::vector& /* tensors */,
+      const AllreduceOptions& /* opts */ = AllreduceOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not support allreduce sparse"));
+  }
+
+  virtual c10::intrusive_ptr allreduce_coalesced(
+      std::vector& /* tensors */,
+      const AllreduceCoalescedOptions& /* opts */ =
+          AllreduceCoalescedOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not support allreduce_coalesced"));
+  }
+
+  virtual c10::intrusive_ptr reduce(
+      std::vector& /* tensors */,
+      const ReduceOptions& /* opts */ = ReduceOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support reduce"));
+  }
+
+  virtual c10::intrusive_ptr allgather(
+      std::vector>& /* outputTensors */,
+      std::vector& /* inputTensors */,
+      const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support allgather"));
+  }
+
+  // Gathers a single tensor inputBuffer into a single buffer outputBuffer that
+  // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
+  // For implementers of ProcessGroup API and advanced users only.
+  // Note: this function will be deprecated in near future.
+  virtual c10::intrusive_ptr _allgather_base(
+      at::Tensor& /* outputBuffer */,
+      at::Tensor& /* inputBuffer */,
+      const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ", getBackendName(), " does not support _allgather_base"));
+  }
+
+  // This function is deprecated and will be moved out of Backend to comms:
+  // * do not add dependencies on this function,
+  // * do not implement it in your Backend, implement _allgather_base
+  //   instead.
+  virtual c10::intrusive_ptr allgather_coalesced(
+      std::vector>& /* outputTensorLists */,
+      std::vector& /* inputTensors */,
+      const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not support allgather_coalesced"));
+  }
+
+  // This function is a coalesced version of `allgather_into_tensor` (currently
+  // still named as `_allgather_base`). Each tensor in the vector corresponds to
+  // an input/output of one `allgather_into_tensor` operation.
+  virtual c10::intrusive_ptr allgather_into_tensor_coalesced(
+      std::vector& /* outputs */,
+      std::vector& /* inputs */,
+      const AllgatherOptions& /* opts */ = AllgatherOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not support allgather_into_tensor_coalesced"));
+  }
+
+  virtual c10::intrusive_ptr gather(
+      std::vector>& /* outputTensors */,
+      std::vector& /* inputTensors */,
+      const GatherOptions& /* opts */ = GatherOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support gather"));
+  }
+
+  virtual c10::intrusive_ptr scatter(
+      std::vector& /* outputTensors */,
+      std::vector>& /* inputTensors */,
+      const ScatterOptions& /* opts */ = ScatterOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support scatter"));
+  }
+
+  virtual c10::intrusive_ptr reduce_scatter(
+      std::vector& /* outputTensors */,
+      std::vector>& /* inputTensors */,
+      const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ", getBackendName(), " does not support reduce_scatter"));
+  }
+
+  virtual c10::intrusive_ptr _reduce_scatter_base(
+      at::Tensor& /* outputBuffer */,
+      at::Tensor& /* inputBuffer */,
+      const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not support _reduce_scatter_base"));
+  }
+
+  // This function is a coalesced version of `reduce_scatter_tensor` (currently
+  // still named as `_reduce_scatter_base`). Each tensor in the vector
+  // corresponds to an input/output of one `reduce_scatter_tensor` operation.
+  virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced(
+      std::vector& /* outputs */,
+      std::vector& /* inputs */,
+      const ReduceScatterOptions& /* opts */ = ReduceScatterOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            getBackendName(),
+            " does not support reduce_scatter_tensor_coalesced"));
+  }
+
+  virtual c10::intrusive_ptr alltoall_base(
+      at::Tensor& /* outputBuffer */,
+      at::Tensor& /* inputBuffer */,
+      std::vector& /* outputSplitSizes */,
+      std::vector& /* inputSplitSizes */,
+      const AllToAllOptions& /* opts */ = AllToAllOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ", getBackendName(), " does not support alltoall_base"));
+  }
+
+  virtual c10::intrusive_ptr alltoall(
+      std::vector& /* outputTensors */,
+      std::vector& /* inputTensors */,
+      const AllToAllOptions& opts = AllToAllOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support alltoall"));
+  }
+
+  virtual void monitoredBarrier(
+      const BarrierOptions& /* unused */,
+      bool /* unused */ = false) {
+    auto backendName = getBackendName();
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            backendName,
+            " does not support monitoredBarrier, only GLOO supports monitored barrier."));
+  }
+
+  // Agrees on an initial sequence number for the whole group by having rank 0
+  // create it and broadcast it to other ranks using the store. Only implemented
+  // for GLOO and NCCL backends currently.
+  virtual void setSequenceNumberForGroup() {
+    auto backendName = getBackendName();
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            backendName,
+            " does not yet support sequence numbers."));
+  }
+
+  // Retrieves the current sequence number for the whole group, which should be
+  // in sync. If the returned number is not consistent across the group, it
+  // may indicate that there is some sort of collective desynchronization.
+  virtual uint64_t getSequenceNumberForGroup() {
+    auto backendName = getBackendName();
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ",
+            backendName,
+            " does not yet support sequence numbers."));
+  }
+
+  virtual c10::intrusive_ptr send(
+      std::vector& /* tensors */,
+      int /* dstRank */,
+      int /* tag */) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support send"));
+  }
+
+  virtual c10::intrusive_ptr recv(
+      std::vector& /* tensors */,
+      int /* srcRank */,
+      int /* tag */) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support recv"));
+  }
+
+  virtual c10::intrusive_ptr recvAnysource(
+      std::vector& /* tensors */,
+      int /* tag */) {
+    TORCH_CHECK(
+        false,
+        c10::str(
+            "Backend ", getBackendName(), " does not support recvAnysource"));
+  }
+
+  virtual c10::intrusive_ptr barrier(
+      const BarrierOptions& /* opts */ = BarrierOptions()) {
+    TORCH_CHECK(
+        false,
+        c10::str("Backend ", getBackendName(), " does not support barrier"));
+  }
+
+  virtual void registerOnCompletionHook(
+      std::function)>&& hook) {
+    TORCH_CHECK(
+        false,
+        "Only ProcessGrouppNCCL supports onCompletion hook, but got ",
+        getBackendName(),
+        " backend.");
+  }
+
+  virtual void waitForPendingWorks() {
+    TORCH_CHECK(
+        false,
+        "Only ProcessGrouppNCCL supports waitForPendingWorks, but got ",
+        getBackendName(),
+        " backend.");
+  }
+
+  virtual void enableCollectivesTiming() {
+    TORCH_CHECK(
+        false,
+        "Backend ",
+        getBackendName(),
+        " is missing implementation of enableCollectivesTiming.");
+  }
+
+  bool hasHooks() const {
+    return onCompletionHook_ != nullptr;
+  }
+
+  // Do not call this directly, use ProcessGroup::setGroupName instead.
+  void setGroupUid(const std::string& pg_uid) {
+    pg_uid_ = pg_uid;
+  }
+
+  const std::string& getGroupUid() const {
+    return pg_uid_;
+  }
+
+  void setGroupDesc(const std::string& desc) {
+    pg_desc_ = desc;
+  }
+
+  const std::string& getGroupDesc() const {
+    return pg_desc_;
+  }
+
+  // See similar functions in ProcessGroup.hpp for context.
+  std::optional getBoundDeviceId() const {
+    return bound_device_id_;
+  }
+
+  // Perform an eager connect to the specified device if the backend supports
+  // it.
+  virtual void eagerConnectSingleDevice(at::Device device) {
+    // no-op in the default case; this is an optimization some
+    // backends may perform
+  }
+
+  void setBoundDeviceId(std::optional device) {
+    if (device) {
+      TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index");
+    }
+    bound_device_id_ = device;
+  }
+
+ protected:
+  // Implementations of this interface need to call this to setup
+  // appropriate logging etc.
+  void init();
+
+  // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
+  const int rank_;
+  // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
+  const int size_;
+  // Debug level setting. It is parsed once when ProcessGroup is constructed and
+  // remains the same across use of this process group.
+  DebugLevel dist_debug_level_;
+  std::string pg_uid_;
+  std::string pg_desc_;
+
+  std::function)> onCompletionHook_;
+
+  std::optional bound_device_id_;
+};
+
+} // namespace c10d
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/CUDASymmetricMemory.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/CUDASymmetricMemory.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2ffd194a76e288145ffb80f1cc5814ab577d955b
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/CUDASymmetricMemory.hpp
@@ -0,0 +1,115 @@
+#pragma once
+
+#include 
+#include 
+#include 
+
+namespace c10d {
+namespace symmetric_memory {
+
+#if !defined(USE_ROCM) && defined(PYTORCH_C10_DRIVER_API_SUPPORTED)
+using HandleType = CUmemGenericAllocationHandle;
+#else
+using HandleType = void*;
+#endif
+
+class CUDASymmetricMemory : public SymmetricMemory {
+ public:
+  CUDASymmetricMemory(
+      std::vector handles,
+      size_t block_size,
+      std::vector buffers,
+      std::vector signal_pads,
+      HandleType mc_handle,
+      void* mc_addr,
+      size_t buffer_size,
+      int local_device_idx,
+      int rank,
+      int world_size);
+
+  ~CUDASymmetricMemory() override;
+
+  std::vector get_buffer_ptrs() override;
+  std::vector get_signal_pad_ptrs() override;
+  void** get_buffer_ptrs_dev() override;
+  void** get_signal_pad_ptrs_dev() override;
+  size_t get_buffer_size() override;
+  size_t get_signal_pad_size() override;
+
+  bool has_multicast_support() override;
+  void* get_multicast_ptr() override;
+
+  at::Tensor get_buffer(
+      int rank,
+      c10::IntArrayRef sizes,
+      c10::ScalarType dtype,
+      int64_t storage_offset) override;
+
+  void barrier(int channel) override;
+  void put_signal(int dst_rank, int channel) override;
+  void wait_signal(int src_rank, int channel) override;
+
+  int get_rank() override;
+  int get_world_size() override;
+
+ private:
+  std::vector handles_;
+  size_t block_size_;
+  std::vector buffers_;
+  std::vector signal_pads_;
+  HandleType mc_handle_;
+  void* mc_addr_;
+  size_t buffer_size_;
+  int local_device_idx_;
+  int rank_;
+  int world_size_;
+  void** buffers_dev_;
+  void** signal_pads_dev_;
+  std::optional> finalizer_;
+};
+
+struct Block : public c10::intrusive_ptr_target {
+  HandleType handle;
+  int device_idx;
+  size_t block_size;
+  size_t buffer_size;
+  size_t signal_pad_offset;
+  std::string group_name;
+  c10::intrusive_ptr symm_mem = nullptr;
+
+  Block(
+      HandleType handle,
+      int device_idx,
+      size_t block_size,
+      size_t buffer_size,
+      size_t signal_pad_offset,
+      const std::string& group_name)
+      : handle(handle),
+        device_idx(device_idx),
+        block_size(block_size),
+        buffer_size(buffer_size),
+        signal_pad_offset(signal_pad_offset),
+        group_name(group_name),
+        symm_mem(nullptr) {}
+};
+
+class CUDASymmetricMemoryAllocator : public SymmetricMemoryAllocator {
+ public:
+  void* alloc(size_t size, int device_idx, const std::string& group_name)
+      override;
+
+  void free(void* ptr) override;
+  size_t get_alloc_size(void* ptr) override;
+  c10::intrusive_ptr rendezvous(void* ptr) override;
+  bool is_rendezvous_completed(void* ptr) override;
+  bool has_multicast_support(int device_idx) override;
+
+ private:
+  c10::intrusive_ptr find_block(void* ptr);
+
+  std::shared_mutex mutex_;
+  std::unordered_map> ptr_to_block_;
+};
+
+} // namespace symmetric_memory
+} // namespace c10d
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b22fb1ae8faf3f65a1032c8d6fd2f81931d72cf2
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GroupRegistry.hpp
@@ -0,0 +1,22 @@
+#pragma once
+
+#include 
+
+namespace c10d {
+
+C10_EXPORT void set_thread_isolation_mode(bool enable);
+
+bool get_thread_isolation_mode();
+
+C10_EXPORT void register_process_group(
+    const std::string& group_name,
+    c10::intrusive_ptr group);
+
+C10_EXPORT c10::intrusive_ptr resolve_process_group(
+    const std::string& group_name);
+
+C10_EXPORT void unregister_process_group(const std::string& group_name);
+
+C10_EXPORT void unregister_all_process_groups();
+
+} // namespace c10d
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NanCheck.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NanCheck.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cc9a5867c3dd400597370adada91022d7dde2201
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NanCheck.hpp
@@ -0,0 +1,16 @@
+#pragma once
+
+#ifdef USE_C10D_NCCL
+
+#include 
+#include 
+
+namespace c10d {
+
+// Check for NaNs in a tensor on a given stream. If any are found, throw a
+// device-side error.
+void checkForNan(const at::Tensor& tensor, at::cuda::CUDAStream& stream);
+
+} // namespace c10d
+
+#endif // USE_C10D_NCCL
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1a2c749129c787b23c819fd42bc719de61c1aea3
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/WinSockUtils.hpp
@@ -0,0 +1,25 @@
+#pragma once
+
+#include 
+
+namespace c10d::tcputil {
+
+#define CONNECT_SOCKET_OFFSET 1
+
+inline int poll(struct pollfd* fdArray, unsigned long fds, int timeout) {
+  return WSAPoll(fdArray, fds, timeout);
+}
+
+inline void addPollfd(
+    std::vector& fds,
+    int socket,
+    short events) {
+  fds.push_back({(SOCKET)socket, events});
+}
+
+inline struct ::pollfd getPollfd(int socket, short events) {
+  struct ::pollfd res = {(SOCKET)socket, events};
+  return res;
+}
+
+} // namespace c10d::tcputil
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c10e5007b9f54495cec904c1d72d2247a70e60b1
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp
@@ -0,0 +1,165 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+
+constexpr auto kNoTimeout = std::chrono::milliseconds(0);
+
+namespace c10d {
+
+constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY";
+
+enum class OpType : std::uint8_t {
+  BROADCAST = 0,
+  ALLREDUCE = 1,
+  ALLREDUCE_COALESCED = 2,
+  REDUCE = 3,
+  ALLGATHER = 4,
+  _ALLGATHER_BASE = 5,
+  ALLGATHER_COALESCED = 6,
+  GATHER = 7,
+  SCATTER = 8,
+  REDUCE_SCATTER = 9,
+  ALLTOALL_BASE = 10,
+  ALLTOALL = 11,
+  SEND = 12,
+  RECV = 13,
+  RECVANYSOURCE = 14,
+  BARRIER = 15,
+  _REDUCE_SCATTER_BASE = 16,
+  COALESCED = 17,
+  _ALLREDUCE_SPARSE = 18,
+  UNKNOWN = 100,
+};
+
+// Converts OpType to human readable string.
+TORCH_API std::string opTypeToString(OpType opType);
+
+// Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE)
+TORCH_API bool isP2POp(OpType opType, bool batchP2P = false);
+
+// Please do not use Work API, it is going away, to be
+// replaced by ivalue::Future.
+// Python binding for this class might change, please do not assume
+// this will be bound using pybind.
+class TORCH_API Work : public torch::CustomClassHolder {
+ public:
+  Work(
+      int rank = -1,
+      OpType opType = OpType::UNKNOWN,
+      const char* profilingTitle = nullptr,
+      const std::optional>& inputTensors =
+          std::nullopt);
+
+  ~Work() override;
+
+  // Checks if request has completed. Non-blocking operation.
+  virtual bool isCompleted();
+
+  // Returns if the work completed successfully.
+  // If false, the exception function can be called to get details.
+  virtual bool isSuccess() const;
+
+  // Returns exception if isSuccess() returned false.
+  virtual std::exception_ptr exception() const;
+
+  // Returns source rank if this objects represents a recv-from-any.
+  virtual int sourceRank() const;
+
+  // Returns result tensors, if applicable.
+  // If work is not supposed to have result, we return empty list.
+  virtual std::vector result();
+
+  // Ensures that operations on the output tensors that are invoked
+  // after this function returns are correctly sequenced after the
+  // asynchronous completion of this work.
+  //
+  // For CUDA tensors, it inserts stream synchronization such that
+  // the streams of the caller wait for completion of the
+  // asynchronous operations on the destination tensors.
+  //
+  // For CPU tensors, it is currently a nop.
+  //
+  // This function should only be used if the caller polls for
+  // completion through the `isCompleted` function, it has returned
+  // true, and the `isSuccess` function also has returned true.
+  //
+  virtual void synchronize();
+
+  // Waits until request completes. Blocking operation.
+  // Throws if the work completed with an exception.
+  // Returns false if the work is aborted.
+  // Otherwise, it always returns true, indicating the work is completed.
+  //
+  // Functionally equivalent to:
+  //
+  //   while (!isCompleted()) { /* nop */ }
+  //   auto success = isSuccess();
+  //   if (!success) { std::rethrow_exception(exception()); }
+  //   return success;
+  //
+  virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout);
+
+  virtual void abort();
+
+  // Returns a Future object that will be associated with the completion of
+  // work. Only NCCL backend is currently supported.
+  virtual c10::intrusive_ptr getFuture();
+
+  virtual float getDuration() const;
+
+  virtual uint64_t getSequencenumber() const;
+
+  OpType retrieveOpType() const;
+
+  static c10::intrusive_ptr create_from_future(
+      const c10::intrusive_ptr&);
+
+ protected:
+  // Completes the work object and optionally sets the exception in a
+  // thread-safe manner. Notifies all waiting condition variables as well.
+  void finish(std::exception_ptr exception = nullptr);
+
+  // Similar to finish, but throws an exception if one is already set or
+  // provided by the user.
+  void finishAndThrow(std::exception_ptr exception);
+
+  mutable std::mutex mutex_;
+  std::condition_variable cv_;
+  bool completed_ = false;
+  std::exception_ptr exception_;
+
+  // Current rank of the node.
+  const int rank_;
+
+  // Operation type that this work object refers to.
+  OpType opType_;
+
+  // When profiling, the callback to record end of operation event. This
+  // callback needs to be called when collective operation is complete.
+  std::function recordFunctionEndCallback_;
+};
+
+struct TORCH_API WorkInfo {
+  WorkInfo(
+      const OpType& opType,
+      const uint64_t seq,
+      const std::chrono::time_point& timeStarted,
+      const std::chrono::time_point& timeFinished,
+      const std::chrono::duration& activeDuration)
+      : opType(opType),
+        seq(seq),
+        timeStarted(timeStarted),
+        timeFinished(timeFinished),
+        activeDuration(activeDuration) {}
+
+  OpType opType;
+  uint64_t seq;
+  std::chrono::time_point timeStarted;
+  std::chrono::time_point timeFinished;
+  std::chrono::duration activeDuration;
+};
+
+} // namespace c10d
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h
new file mode 100644
index 0000000000000000000000000000000000000000..5151a33f7ee351184e53daa68155dcc6c7390358
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/c10d.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include 
+
+namespace torch {
+namespace distributed {
+namespace c10d {
+
+PyMethodDef* python_functions();
+
+} // namespace c10d
+} // namespace distributed
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h
new file mode 100644
index 0000000000000000000000000000000000000000..a00b6f70653aaa8d4456033800c5dc69942e3b03
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/exception.h
@@ -0,0 +1,33 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#pragma once
+
+#include 
+
+#include 
+#include 
+
+// Utility macro similar to C10_THROW_ERROR, the major difference is that this
+// macro handles exception types defined in the c10d namespace, whereas
+// C10_THROW_ERROR requires an exception to be defined in the c10 namespace.
+#define C10D_THROW_ERROR(err_type, msg) \
+  throw ::c10d::err_type(               \
+      {__func__, __FILE__, static_cast(__LINE__)}, msg)
+
+namespace c10d {
+
+using c10::DistNetworkError;
+
+class TORCH_API SocketError : public DistNetworkError {
+  using DistNetworkError::DistNetworkError;
+};
+
+class TORCH_API TimeoutError : public DistNetworkError {
+  using DistNetworkError::DistNetworkError;
+};
+
+} // namespace c10d
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/intra_node_comm.hpp b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/intra_node_comm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..37fe285cb929ee46161d0f89d52ffa7fe25b4112
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/intra_node_comm.hpp
@@ -0,0 +1,152 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace c10d::intra_node_comm {
+
+using namespace c10d::symmetric_memory;
+
+constexpr size_t kMaxDevices = 8;
+constexpr size_t kDefaultBufferSize = 10ull * 1024 * 1024;
+constexpr size_t kP2pStateSize = 2048;
+
+using NvlMesh = std::array, kMaxDevices>;
+using HybridCubeMesh = std::array, kMaxDevices>;
+
+enum class Topology : uint8_t {
+  UNKNOWN = 0,
+  FULLY_CONNECTED = 1,
+  HYBRID_CUBE_MESH = 2
+};
+
+enum class AllReduceAlgo : uint8_t {
+  NONE = 0,
+  ONE_SHOT = 1,
+  TWO_SHOT = 2,
+  HCM = 3
+};
+
+// NOTE: this class will be be removed soon in favor of SymmetricMemory
+class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target {
+ public:
+  IntraNodeComm(
+      c10::intrusive_ptr store,
+      size_t rank,
+      size_t worldSize,
+      std::optional bufferSize = std::nullopt);
+
+  ~IntraNodeComm() override;
+
+  static bool isEnabled();
+
+  /**
+   * Performs rendezvous.
+   * If rendezvous fails, the IntraNodeComm object will be in an invalid
+   * state and it is the caller's responsibility to dispose it.
+   */
+  bool rendezvous();
+
+  Topology getTopology() {
+    return topology_;
+  }
+
+  size_t getBufferSize() {
+    return bufferSize_;
+  }
+
+  /**
+   * Selects a AllReduceAlgo that we think will outperform nccl.
+   * Returns AllReduceAlgo::NONE if we don't think we can outperform nccl.
+   */
+  AllReduceAlgo selectAllReduceAlgo(const at::Tensor& input);
+
+  at::Tensor allReduce(const at::Tensor& input, AllReduceAlgo algo);
+
+  /**
+   * Perform a barrier among the specified ranks.
+   */
+  void barrier(std::optional> ranks = std::nullopt);
+
+  at::Tensor getBuffer(
+      size_t rank,
+      const std::vector& sizes,
+      c10::ScalarType dtype,
+      int64_t storageOffset);
+
+ private:
+  at::Tensor oneShotAllReduce(
+      const at::Tensor& input,
+      at::cuda::CUDAStream& stream);
+
+  at::Tensor twoShotAllReduce(
+      const at::Tensor& input,
+      at::cuda::CUDAStream& stream);
+
+  at::Tensor hybridCubeMeshAllReduce(
+      const at::Tensor& input,
+      at::cuda::CUDAStream& stream);
+
+  c10::intrusive_ptr store_;
+  size_t rank_;
+  size_t worldSize_;
+  size_t bufferSize_;
+  at::cuda::CUDAEvent barrierReady_;
+
+  /**
+   * Members initialized after rendezvous
+   */
+  bool isInitialized_ = false;
+  int deviceIdx_;
+  Topology topology_ = Topology::UNKNOWN;
+  void* symmetricMemoryPtr_ = nullptr;
+  c10::intrusive_ptr symmetricMemory_ = nullptr;
+  void* p2pStatesDev_{};
+  void* buffersDev_{};
+  void* topoInfo_{};
+};
+
+/**
+ * NOTE [IntraNodeComm Stream Semantics]
+ *
+ * ProcessGroupNCCL launches kernels differently from the conventional PyTorch
+ * CUDA semantics: it always launches collective kernels onto a dedicated
+ * communication stream. Therefore, it needs to:
+ *
+ * - Synchronize the calling stream and the comm stream.
+ * - Ensure the memory safety of the operands (via record_stream or stashing).
+ * - Synchronize the waiting stream with the comm stream.
+ *
+ * Unconditionally performing these tasks makes sense when we expect most of the
+ * communication to benefit from compute/comm overlap. However, IntraNodeComm
+ * primarily aims to optimize small, latency-sensitive, blocking communication,
+ * in which the overhead incurred by the above steps can be quite pronounced.
+ *
+ * Thus, IntraNodeComm follows the conventional PyTorch CUDA semantics and
+ * launches kernels onto the stream specified by the user. Although the user
+ * can perform neccessary synchronization via wait_stream, to provide a UX
+ * consistent to that of ProcessGroupNCCL, the neccessary stream
+ * synchronization can also be performed via IntraNodeWork::wait().
+ */
+class IntraNodeCommWork : public c10d::Work {
+ public:
+  IntraNodeCommWork() : c10d::Work() {
+    event_.record();
+  }
+
+  bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
+    event_.block(at::cuda::getCurrentCUDAStream());
+    return true;
+  }
+
+ private:
+  at::cuda::CUDAEvent event_;
+};
+
+TORCH_API int64_t getIntraNodeCommUsageCounter();
+
+} // namespace c10d::intra_node_comm
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h
new file mode 100644
index 0000000000000000000000000000000000000000..1e42a53b0b530dc2b58867a248ed985a9748130b
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/socket.h
@@ -0,0 +1,109 @@
+// Copyright (c) Meta Platforms, Inc. and its affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+namespace c10d {
+namespace detail {
+
+class SocketOptions {
+ public:
+  SocketOptions& prefer_ipv6(bool value) noexcept {
+    prefer_ipv6_ = value;
+
+    return *this;
+  }
+
+  bool prefer_ipv6() const noexcept {
+    return prefer_ipv6_;
+  }
+
+  SocketOptions& connect_timeout(std::chrono::milliseconds value) noexcept {
+    connect_timeout_ = value;
+
+    return *this;
+  }
+
+  std::chrono::milliseconds connect_timeout() const noexcept {
+    return connect_timeout_;
+  }
+
+  // Sets the backoff policy to use for socket connect ops.
+  SocketOptions& connect_backoff(std::shared_ptr value) noexcept {
+    connect_backoff_ = std::move(value);
+
+    return *this;
+  }
+
+  const std::shared_ptr& connect_backoff() const noexcept {
+    return connect_backoff_;
+  }
+
+ private:
+  bool prefer_ipv6_ = true;
+  std::chrono::milliseconds connect_timeout_{std::chrono::seconds{30}};
+  std::shared_ptr connect_backoff_{
+      std::make_shared(std::chrono::milliseconds(1000))};
+};
+
+class SocketImpl;
+
+class Socket {
+ public:
+  // This function initializes the underlying socket library and must be called
+  // before any other socket function.
+  static void initialize();
+
+  static Socket listen(std::uint16_t port, const SocketOptions& opts = {});
+
+  static Socket listenFromFd(int fd, std::uint16_t expected_port);
+
+  static Socket connect(
+      const std::string& host,
+      std::uint16_t port,
+      const SocketOptions& opts = {});
+
+  Socket() noexcept = default;
+
+  Socket(const Socket& other) = delete;
+
+  Socket& operator=(const Socket& other) = delete;
+
+  Socket(Socket&& other) noexcept;
+
+  Socket& operator=(Socket&& other) noexcept;
+
+  ~Socket();
+
+  Socket accept() const;
+
+  int handle() const noexcept;
+
+  std::uint16_t port() const;
+
+  bool waitForInput(std::chrono::milliseconds timeout);
+
+  std::string repr() const;
+
+ private:
+  explicit Socket(std::unique_ptr&& impl) noexcept;
+
+  std::unique_ptr impl_;
+};
+
+} // namespace detail
+
+} // namespace c10d
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h
new file mode 100644
index 0000000000000000000000000000000000000000..496ecbdbbc6c530e50e0d3a8e3b815e9a5f005e0
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h
@@ -0,0 +1,61 @@
+#pragma once
+
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+class TORCH_API BackendData {
+ public:
+  struct Info {
+    /**
+     * Used by Lazy Graph Executor to tag info on BackendData objs
+     * */
+    virtual ~Info() = default;
+  };
+  /**
+   * Represents (Tensor) data stored on a backend device
+   * in its native format.
+   * */
+  using Handle = int64_t;
+
+  BackendData(BackendDevice device, Shape shape)
+      : device_(std::move(device)), shape_(std::move(shape)) {}
+
+  virtual ~BackendData() = default;
+
+  const BackendDevice& device() const {
+    return device_;
+  }
+
+  const Shape& shape() const {
+    return shape_;
+  }
+
+  Info* info() const {
+    return info_.get();
+  }
+
+  std::shared_ptr SetInfo(std::shared_ptr info) {
+    std::swap(info, info_);
+    return info;
+  }
+
+  virtual Handle GetHandle() = 0;
+
+  virtual void Assign(const BackendData& data) = 0;
+
+  virtual bool HasValue() const = 0;
+
+ private:
+  BackendDevice device_;
+  Shape shape_;
+  std::shared_ptr info_;
+};
+
+using BackendDataPtr = std::shared_ptr;
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h
new file mode 100644
index 0000000000000000000000000000000000000000..fdfc2ac15d9a89351c43517071fdec857c3d5f2f
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h
@@ -0,0 +1,100 @@
+#pragma once
+
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+namespace c10 {
+struct Device;
+}
+
+namespace torch {
+namespace lazy {
+
+// Backend should extend it and define their own supported hardware types.
+struct TORCH_API BackendDeviceType {
+  int8_t type{(int8_t)at::kCPU};
+  // Note: previous default value was '0', which actually maps to at::kCPU, at
+  // least now it is explicit, we may want to make default/undefined semantics
+  // more clear though
+  BackendDeviceType() : type((int8_t)at::kCPU) {}
+  BackendDeviceType(int8_t type) : type(type) {}
+
+  virtual ~BackendDeviceType() = default;
+  virtual std::string toString() const {
+    return "Unknown";
+  }
+};
+
+class TORCH_API BackendDevice {
+ public:
+  // The default constructor will set both the device type and ordinal
+  // to backend specific defaults.
+  BackendDevice();
+  BackendDevice(std::shared_ptr&& type, int64_t ordinal);
+
+  int8_t type() const;
+  int64_t ordinal() const {
+    return ordinal_;
+  }
+
+  bool operator==(const BackendDevice& other) const {
+    return compare(other) == 0;
+  }
+  bool operator!=(const BackendDevice& other) const {
+    return compare(other) != 0;
+  }
+  bool operator<(const BackendDevice& rhs) const {
+    return compare(rhs) < 0;
+  }
+
+  std::string toString() const;
+
+ private:
+  int compare(const BackendDevice& rhs) const;
+
+  // Use shared_ptr instead of unique_ptr so that BackendDevice can be copied.
+  std::shared_ptr type_;
+  int64_t ordinal_;
+};
+
+TORCH_API std::ostream& operator<<(
+    std::ostream& os,
+    const BackendDevice& device);
+
+// Helpers for converting a c10::Device to BackendDevice and vice versa.
+TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device);
+TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device);
+
+// Tries to extract the backend device out of the lazy tensor. Returns nullopt
+// if the input is not a lazy tensor.
+TORCH_API std::optional GetBackendDevice(
+    const at::ITensorListRef tensors);
+TORCH_API std::optional GetBackendDevice(
+    const at::TensorList tensors);
+TORCH_API std::optional GetBackendDevice(
+    const at::Tensor& tensor);
+TORCH_API std::optional GetBackendDevice(
+    const std::optional& device);
+
+// For variadic template.
+TORCH_API std::optional GetBackendDevice();
+
+template 
+std::optional GetBackendDevice(
+    const T& tensor,
+    const Args&... forward_tensors) {
+  auto optional_device = GetBackendDevice(tensor);
+  if (optional_device) {
+    return optional_device;
+  }
+  return GetBackendDevice(forward_tensors...);
+}
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h
new file mode 100644
index 0000000000000000000000000000000000000000..366311921c3945e4c2b564cc36cf7423895f8f94
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h
@@ -0,0 +1,158 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+struct IrBuilder;
+
+/**
+ * Work in progress- don't treat this as a stable interface yet!
+ */
+class TORCH_API BackendImplInterface {
+ public:
+  virtual ~BackendImplInterface() = default;
+
+  /**
+   * Initialization/Teardown
+   * */
+  // No-op by default. Allows custom functionality to be exposed through
+  // extension bindings.
+  virtual void InitializeAtenBindings() const {}
+
+  virtual void PrepareToExit() const = 0;
+
+  /**
+   * Configuration
+   * */
+
+  virtual void SetRngSeed(size_t seed) const = 0;
+
+  /**
+   * IR Tracing
+   * */
+
+  virtual const IrBuilder* GetIrBuilder() const = 0;
+
+  /**
+   * Data Transfer
+   * */
+
+  virtual BackendDataPtr MakeComputationDataFromTensor(
+      const at::Tensor& tensor,
+      const Shape& shape,
+      const BackendDevice& device) const = 0;
+  virtual BackendDataPtr MakeComputationDataFromScalar(
+      const at::Scalar& scalar,
+      const torch::lazy::BackendDevice& device) const = 0;
+  virtual BackendDataPtr CreateDataPlaceholder(
+      const BackendDevice& device,
+      const Shape& shape) const = 0;
+
+  // Gets backend data if the node is a device data node. Otherwise returns
+  // nullptr
+  virtual BackendDataPtr GetComputationDataFromNode(const Node*) const = 0;
+
+  virtual at::Tensor MakeTensorFromComputationData(
+      const BackendDataPtr data,
+      std::optional logical_scalar_type) const = 0;
+
+  /**
+   * Lowering, Compilation, Execution
+   * */
+
+  virtual std::unique_ptr CreateLoweringContext(
+      const std::string& name,
+      BackendDevice device,
+      c10::ArrayRef post_order,
+      Util::EmissionMap emit_status) const = 0;
+
+  virtual std::unique_ptr CreateLoweringContext(
+      const std::string& name,
+      BackendDevice device) const = 0;
+
+  // TODO(whc) need to keep this?
+  virtual std::vector GetCompilationDevices(
+      const std::string& device,
+      c10::ArrayRef devices) const = 0;
+
+  virtual std::vector Compile(
+      std::vector instances) const = 0;
+
+  virtual std::vector ExecuteComputation(
+      torch::lazy::ComputationPtr computation,
+      c10::ArrayRef arguments,
+      const BackendDevice& device) const = 0;
+
+  /**
+   * Device Configuration
+   * */
+
+  // Set or get the default device type.
+  // For backends used with virtual c10::Devices, this configures what real
+  // device type the backend should use, and matters if the backend supports
+  // more than one type of real device.
+  virtual std::shared_ptr GetDefaultDeviceType() const = 0;
+  virtual void SetDefaultDeviceType(int8_t type) = 0;
+
+  // Set or get the default device ordinal.
+  // For backends that supports multi-device, this configures what the
+  // default device the backend should use.
+  virtual int64_t GetDefaultDeviceOrdinal() const = 0;
+  virtual void SetDefaultDeviceOrdinal(int64_t) = 0;
+
+  // Specify which aten device should be used for eager fallback
+  // may change depending on current 'Default' DeviceType
+  virtual at::DeviceType EagerFallbackDeviceType() const = 0;
+
+  // Query all available backend devices
+  virtual std::vector GetBackendDevices() const = 0;
+
+  virtual std::string CreateMetricReport() const {
+    return "";
+  }
+
+  // Map a particular c10:: device to a concrete backend device
+  // Note:: c10:: devices may be virtual or concrete.  xla:: and lazy:: are
+  // virtual devices, meaning they may map to a gpu, tpu, etc. behind the
+  // scenes. In the future, non-virtual c10:: devices may also use lazy tensors
+  // through a mode, in which case these APIs should still work, but should be
+  // identity mappings.
+  virtual BackendDevice GetBackendDevice(c10::Device device) const = 0;
+
+  // TODO(whc)
+  // Additional APIs expected for supporting distributed training, to be
+  // designed
+
+  /**
+   * Debug/Metrics
+   * */
+
+  //   virtual std::map GetMetrics() const = 0;
+
+  //   virtual MemoryInfo GetMemoryInfo(const std::string& device) = 0;
+
+  virtual std::string GetComputationBackendText(
+      const ComputationPtr computation) const = 0;
+};
+
+class TORCH_API BackendRegistrar {
+ public:
+  BackendRegistrar(const BackendImplInterface* backend_impl_interface);
+};
+
+TORCH_API bool hasBackend();
+TORCH_API const BackendImplInterface* getBackend();
+
+TORCH_API const IrBuilder* getIrBuilder();
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h
new file mode 100644
index 0000000000000000000000000000000000000000..49e7b8be58cbf234b546bc4988e870a520d797f8
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/lowering_context.h
@@ -0,0 +1,114 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+class TORCH_API Computation {
+ public:
+  virtual int parameters_size() const = 0;
+
+  virtual const std::vector& parameter_shapes() const = 0;
+
+  virtual const std::vector& parameter_names() const = 0;
+
+  virtual const Shape& result_shape() const = 0;
+
+  virtual const std::string to_string() const = 0;
+
+  virtual ~Computation() = default;
+
+  // Indicates whether this computation is being executed inside a mark step
+  // Assume false unless set otherwise
+  bool in_mark_step = false;
+};
+
+using ComputationPtr = std::shared_ptr;
+
+// Keeps track of the code generation state.
+class TORCH_API LoweringContext {
+ public:
+  LoweringContext(const std::string& name, BackendDevice device);
+  LoweringContext(
+      const std::string& name,
+      BackendDevice device,
+      c10::ArrayRef post_order,
+      Util::EmissionMap emit_status);
+
+  virtual ~LoweringContext() = default;
+
+  static std::unique_ptr Create(
+      const std::string& name,
+      BackendDevice device,
+      c10::ArrayRef post_order,
+      Util::EmissionMap emit_status);
+
+  static std::unique_ptr Create(
+      const std::string& name,
+      BackendDevice device);
+
+  const BackendDevice& device() const {
+    return device_;
+  };
+
+  // Retrieves the vector holding all the tensors associated with the parameter
+  // instructions which have been created.
+  const std::vector& GetParametersData() const;
+
+  // Adds a new input/output alias.
+  virtual void SetUpAlias(
+      const std::vector& output_index,
+      int64_t param_number,
+      const std::vector& param_index,
+      bool must_alias = false) {
+    // Dummy default implementation to do nothing.
+  }
+
+  // Check if parameter shape matches result at index.
+  virtual bool CheckResultShape(
+      const BackendDataPtr& parameter_data,
+      size_t result_idx) {
+    // Dummy default implementation to do nothing.
+    return false;
+  }
+
+  // Adds the given output as a component of the result tuple and returns its
+  // assigned position within the tuple.
+  virtual size_t AddResult(const torch::lazy::Output& output) = 0;
+
+  // Associates the given output with the input parameter of the given index and
+  // shape. Only used for the operator-by-operator execution, mostly for
+  // debugging purposes.
+  virtual void AddParameter(
+      const torch::lazy::Output& output,
+      size_t index,
+      const Shape& shape,
+      const std::string& name) = 0;
+
+  // Build the computation capturing all the operations created with the
+  // embedded builder (returned by the builder() API).
+  virtual ComputationPtr Build() = 0;
+
+  size_t GetEmittedNodeCount() const {
+    return emit_status_.size();
+  }
+
+ protected:
+  BackendDevice device_;
+  std::vector parameters_;
+  std::vector parameter_sequence_;
+  Util::EmissionMap emit_status_;
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h
new file mode 100644
index 0000000000000000000000000000000000000000..ef4b81e1ca9c5dd7878603f4efcf8f381825dc4b
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/debug_util.h
@@ -0,0 +1,47 @@
+#pragma once
+
+#include 
+#include 
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+TORCH_API std::function()>&
+GetPythonFramesFunction();
+
+TORCH_API std::string GetFirstUserFrameInPython();
+
+class TORCH_API DebugUtil {
+ public:
+  enum GraphFormat {
+    kText,
+    kDot,
+    kBackend,
+  };
+
+  static GraphFormat GetDefaultGraphFormat();
+
+  // Dumps the current Python frame and the IR Graph whose roots are the IR
+  // values held at the tensors. If indices is not nullptr, it selects the
+  // indices of the tensors whose graph will be emitted.
+  static std::string GetTensorsGraphInfo(
+      c10::ArrayRef tensors,
+      const std::vector* indices,
+      GraphFormat format = GetDefaultGraphFormat());
+
+  // If the environment variable LTC_SAVE_TENSORS_FILE is set to the proper
+  // output path, an instance of the report returned by GetTensorsGraphInfo() is
+  // saved.
+  static void SaveTensorsGraphInfo(
+      const char* name,
+      c10::ArrayRef tensors,
+      const std::vector* indices,
+      GraphFormat format = GetDefaultGraphFormat());
+
+  static bool ExperimentEnabled(const std::string& name);
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h
new file mode 100644
index 0000000000000000000000000000000000000000..4b4e1e0749b24f619e2d97f41fba31b24d8fd31f
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h
@@ -0,0 +1,32 @@
+#pragma once
+
+#include 
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+class BackendDevice;
+
+class TORCH_API DumpUtil {
+ public:
+  static std::string ToDot(c10::ArrayRef nodes);
+
+  static std::string PostOrderToDot(
+      c10::ArrayRef post_order,
+      c10::ArrayRef roots);
+
+  static std::string ToText(c10::ArrayRef nodes);
+
+  static std::string PostOrderToText(
+      c10::ArrayRef post_order,
+      c10::ArrayRef roots);
+
+  static std::string ToBackend(
+      c10::ArrayRef values,
+      const BackendDevice& device);
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h
new file mode 100644
index 0000000000000000000000000000000000000000..df3d0fd7ac406dce34e0e9ce4cab85ab6e3f130e
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_util.h
@@ -0,0 +1,47 @@
+#pragma once
+
+#include 
+#include 
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+class TORCH_API Util {
+ public:
+  // Tracks the emission status of the nodes during the post-order generation.
+  // It helps tracking loops within the computation graphs.
+  enum EmitStatus {
+    kNotEmitted,
+    kEmitting,
+    kEmitted,
+  };
+
+  using EmissionMap = std::unordered_map;
+
+  // Computes the post order from the given node, without using recursion. The
+  // emission map can be used as saved state, for multiple separate calls to
+  // this API. The returned post-order can be empty if the node has already been
+  // emitted inside the emission map. An error is generated if a loop is
+  // detected.
+  static std::vector ComputePostOrder(
+      const Node* node,
+      EmissionMap* emap);
+
+  static std::vector ComputePostOrder(
+      c10::ArrayRef nodes,
+      EmissionMap* emap);
+
+  // Same as above, but computes the post order on the set of nodes specified as
+  // argument.
+  static std::vector ComputePostOrder(
+      c10::ArrayRef nodes);
+
+  // Retrieves the number of nodes within the graph whose sink are passed in the
+  // nodes argument.
+  static size_t GetGraphSize(c10::ArrayRef nodes);
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h
new file mode 100644
index 0000000000000000000000000000000000000000..d2edbb75ffba338bcc79e33e529fdd3005b3f1dc
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h
@@ -0,0 +1,426 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+class TORCH_API LazyGraphExecutor {
+ public:
+  struct DeviceDataInfo : public BackendData::Info {
+    DeviceDataInfo(int64_t tensor_id, bool read_only)
+        : tensor_id(tensor_id), read_only(read_only) {}
+
+    int64_t tensor_id = 0;
+    bool read_only = false;
+  };
+
+  // Register a lazy graph executor instance that can be retrieved using Get()
+  static void Register(LazyGraphExecutor*);
+  static LazyGraphExecutor* Get();
+
+  virtual ~LazyGraphExecutor() = default;
+
+  // Override these methods to perform custom tensor registration and
+  // unregistration Note: It is vital that the parent implementations are also
+  // called in order for the tensors to show up in the live tensor list
+  virtual void RegisterTensor(std::shared_ptr data);
+  virtual void UnregisterTensor(LazyTensor::Data* data);
+
+  // Seed for random generator.
+  // Override to supply your own DeviceContextArena.
+  virtual Value GetRngSeed(const BackendDevice& device);
+  virtual uint64_t GetRunningSeed(const BackendDevice& device);
+  virtual void SetRngSeed(const BackendDevice& device, uint64_t seed);
+
+  void DeviceBarrier(const BackendDevice& device);
+
+  BackendDataPtr GetDeviceData(
+      const at::Tensor& tensor,
+      const BackendDevice& device);
+
+  BackendDataPtr GetDeviceData(
+      const at::Scalar& value,
+      at::ScalarType scalar_type,
+      const BackendDevice& device);
+
+  // Retrieves the set of lazy tensors which are currently live in the system,
+  // for the given device. If device is nullptr, the live tensors for all
+  // devices will be returned. Returned tensors are sorted by device as primary
+  // key, and by unique ID as secondary key.
+  std::vector GetLiveTensors(const BackendDevice* device);
+
+  // Makes sure that any outstanding IR operation accumulated over live tensors,
+  // gets turned into device data. If wait is true, the sync operation will be
+  // run synchronously. The devices argument, if not empty, tells the devices
+  // which should be partecipating into the replicated computation.
+  virtual void SyncLiveTensorsGraph(
+      const BackendDevice* device,
+      c10::ArrayRef devices,
+      bool wait);
+
+  // Applies all the pending IR operations queued over the input tensors. All
+  // the tensors must be on the same device. If wait is true, the sync operation
+  // will be run synchronously. The devices argument, if not empty, tells the
+  // devices which should be partecipating into the replicated computation.
+  void SyncTensorsGraph(
+      std::vector* tensors,
+      c10::ArrayRef devices,
+      bool wait,
+      bool sync_ltc_data);
+
+  // Marks an execution step, which allows the tensor framework to understand
+  // the computation boundaries.
+  // Override to supply your own DeviceContextArena.
+  virtual void MarkStep(const BackendDevice& device);
+
+  // Waits for all the outstanding operations on all the supplied devices.
+  // If devices is empty, the wait will happen for all local devices.
+  void WaitDeviceOps(c10::ArrayRef devices);
+
+  // Retrieves the PyTorch CPU tensors behind the lazy tensors IR operations.
+  // All the tensors must be on the same device.
+  std::vector GetTensors(std::vector* tensors);
+
+  size_t IncTrimCounter() const;
+
+  // Dumps the backend specific text of the computation accumulated in the graph
+  // which is attached the tensors.
+  std::string DumpBackendComputation(const std::vector& tensors);
+
+  Value GetDeviceDataIrValue(
+      const at::Scalar& value,
+      c10::ScalarType type,
+      const BackendDevice& device);
+  Value GetIrValueForScalar(
+      const at::Scalar& value,
+      c10::ScalarType type,
+      const BackendDevice& device);
+  Value GetIrValueForScalar(
+      const at::Scalar& value,
+      const BackendDevice& device);
+
+  // TODO: even though this API is currently used **only** in codegen to
+  // generate real scalar IR values vs scalar tensors, we would like to
+  // use it in other cases where `GetIrValueForXXXScalar` is used, as well
+  // In order to do that, we need to untangle the cases where we don't need
+  // `expand` and where we don't expect a scalar tensor
+  Value GetIrValueForScalarFromCodegen(
+      const at::Scalar& value,
+      const BackendDevice& device);
+  Value GetIrValueForExpandedScalar(
+      const at::Scalar& value,
+      const Shape& shape,
+      const BackendDevice& device);
+
+  struct CachedComputation {
+    explicit CachedComputation(ComputationPtr computation)
+        : computation(std::move(computation)) {}
+
+    ComputationPtr computation;
+  };
+
+  using ComputationCache = Cache;
+
+  ComputationCache* GetComputationCache();
+
+  hash_t GetGraphHash(const std::vector& tensors);
+
+ protected:
+  // TODO(alanwaketan): Revisit if all of them need to be accessible to
+  // derived classes.
+
+  struct SyncTensorsConfig {
+    // Whether we want to force data on the target tensors (hence trimming
+    // the IR graph above them).
+    bool force_ltc_data = true;
+    // Whether when setting the data, the other properties of the tensor
+    // state should be reset.
+    bool sync_ltc_data = true;
+  };
+
+  struct SyncTensorCollection {
+    SyncTensorCollection() : hash(0) {}
+
+    SyncTensorsConfig config;
+    std::vector indices;
+    hash_t hash;
+    std::vector unlocker;
+    BackendDevice device;
+  };
+
+  struct PostOrderData {
+    std::vector post_order;
+    Util::EmissionMap emission_map;
+    std::vector parameters_data;
+    std::vector parameter_sequence;
+  };
+
+  // Locking:
+  // We perform two kinds of operations of tensors, synchronous and
+  // asynchronous. The ApplyPendingGraph() are synchronous, as we need the
+  // device data result immediately. Before the synchronous operations can
+  // start, they need to wait that the pending asynchronous operations have
+  // completed. Synchronous operations do not hold device locks, since they are
+  // strictly sequential, dictated by the PyTorch execution order. The
+  // SyncTensorsGraph() is asynchronous, and returns immediately after having
+  // scheduled the asynchronous operation. While executing, the asynchronous
+  // operations will hold locks on all the participating devices (in most common
+  // cases there will be only one device).
+  // Since asynchronous operations capture device locks, only one asynchronous
+  // operation can execute at the same time, on a given device. Tensor
+  // operations which send data to device do not need to hold any device locks
+  // while doing so. Only operations which _use_ device data (computations, and
+  // transfer from server) need to wait for asynchronous operations to complete
+  // (barrier).
+
+  class DeviceLocker {
+   public:
+    explicit DeviceLocker(BackendDevice device) : device_(std::move(device)) {}
+
+    const BackendDevice& device() const {
+      return device_;
+    }
+
+    void Lock();
+    void Unlock(std::exception_ptr exptr);
+    void Barrier();
+
+   private:
+    void CheckResetException();
+
+    BackendDevice device_;
+    std::mutex mutex_;
+    std::condition_variable cv_;
+    bool locked_ = false;
+    std::exception_ptr exptr_;
+  };
+
+  class DeviceLockerArena {
+   public:
+    static DeviceLockerArena* Get();
+
+    std::shared_ptr GetLocker(const BackendDevice& device);
+
+    void DeviceBarrier(const BackendDevice& device);
+
+    // Use a set to impose an order on the device locking sequence (ABBA
+    // prevention).
+    std::vector LockDevices(
+        const std::set& devices);
+
+   private:
+    ExceptionCleanup LockDevice(const BackendDevice& device);
+
+    std::mutex mutex_;
+    std::map> lockers_;
+  };
+
+  class DataCacheArena {
+   public:
+    static DataCacheArena* Get();
+
+    BackendDataPtr GetDeviceData(
+        const at::Tensor& tensor,
+        const BackendDevice& device);
+
+    BackendDataPtr GetDeviceData(
+        const at::Scalar& value,
+        at::ScalarType scalar_type,
+        const BackendDevice& device);
+
+   private:
+    struct TensorHasher {
+      size_t operator()(const at::Tensor& tensor) const;
+    };
+    struct TensorComparer {
+      bool operator()(const at::Tensor& tensor1, const at::Tensor& tensor2)
+          const;
+    };
+
+    explicit DataCacheArena(size_t max_cache_size);
+
+    using DataCache =
+        Cache;
+
+    DataCache* GetDataCache(const BackendDevice& device);
+
+    size_t max_cache_size_ = 0;
+    std::mutex mutex_;
+    std::map> device_caches_;
+  };
+
+  // The DeviceContextArena holds per device live information and statistics,
+  // among which the lazy tensors which are currently alive in the system. This
+  // is used to create computation "barriers" in order to flush pending
+  // operations and ensure the same computations are created during the training
+  // loops.
+  // TODO(alanwaketan): Add a registry such that we don't need to make all
+  // related methods virtual.
+  class DeviceContextArena {
+   protected:
+    struct DeviceContext {
+      std::mutex lock;
+      std::map> tensors_data;
+      uint64_t seed = 101;
+      uint64_t running_seed = 101;
+      Value seed_ir_value;
+    };
+
+   public:
+    static DeviceContextArena* Get();
+    virtual ~DeviceContextArena() = default;
+
+    void RegisterTensor(std::shared_ptr data);
+    void UnregisterTensor(LazyTensor::Data* data);
+
+    std::vector GetLiveTensors(const BackendDevice* device);
+
+    // Overriding it allow derived class to use their own IRs for Value.
+    virtual Value GetRngSeed(const BackendDevice& device);
+    uint64_t GetRunningSeed(const BackendDevice& device);
+    void SetRngSeed(const BackendDevice& device, uint64_t seed);
+
+    void MarkStep(const BackendDevice& device);
+
+    std::vector GetActiveDevices();
+
+   protected:
+    DeviceContext* GetDeviceContext(const BackendDevice& device);
+
+    void ForAllDeviceContexts(
+        const std::function& fn,
+        const BackendDevice* device);
+
+    // Overriding it allow derived class to use their own conversions.
+    virtual Value IrValueFromScalar(
+        const at::Scalar& value,
+        at::ScalarType scalar_type,
+        const BackendDevice& device);
+
+   private:
+    std::vector GetAllDeviceContexts();
+
+    std::mutex lock_;
+    std::map device_contexts_;
+  };
+
+  struct Async {
+    Async(
+        SyncTensorCollection* coll,
+        std::vector parameters_data,
+        std::vector tensors_data,
+        ComputationCache::TypePtr cached_computation);
+    virtual ~Async() = default;
+
+    void Wait();
+
+    MultiWait mwait;
+    std::vector indices;
+    std::vector unlocker;
+    std::vector parameters_data;
+    BackendDevice device;
+    ComputationCache::TypePtr cached_computation;
+    std::vector tensors_data;
+  };
+
+  void ResetTrimCounter() const;
+
+  // Waits for this SyncTensorCollection's device barrier and acquire the lock.
+  virtual void TensorCollectionBarrier(SyncTensorCollection* coll);
+
+  // One can override to insert your own profiler.
+  virtual PostOrderData RunPostOrder(
+      const std::vector& ir_values,
+      SyncTensorCollection* coll);
+
+ private:
+  struct CompilationResult {
+    BackendDevice device;
+    size_t emitted_nodes = 0;
+    ComputationPtr computation;
+    std::vector parameters_data;
+  };
+
+  virtual bool ShouldSyncTensor(const LazyTensorPtr& tensor) const;
+
+  SyncTensorCollection CollectSyncTensors(
+      const std::vector& tensors,
+      const SyncTensorsConfig& config);
+
+  std::vector CollectRoots(
+      const std::vector& tensors,
+      c10::ArrayRef indices);
+
+  std::vector SetTensorData(
+      std::vector* tensors,
+      const SyncTensorsConfig& config,
+      c10::ArrayRef indices,
+      const std::vector& tensor_data_vec);
+
+  void ExtractIRAndPrepareTensorData(
+      std::vector* tensors,
+      const SyncTensorsConfig& config,
+      c10::ArrayRef indices,
+      std::vector& ir_values,
+      std::vector& tensor_data_vec);
+
+  std::shared_ptr TryRunCachedSync(
+      std::vector* tensors,
+      SyncTensorCollection* coll,
+      PostOrderData* po_data,
+      const std::vector& tensor_data_vec);
+
+  CompilationResult Compile(
+      const std::vector& tensors,
+      c10::ArrayRef devices,
+      const SyncTensorCollection& coll,
+      PostOrderData* po_data,
+      const std::vector& ir_values);
+
+  ComputationCache::TypePtr LookupCachedCompile(const hash_t& hash);
+
+  std::shared_ptr SyncTensorsGraphInternal(
+      std::vector* tensors,
+      c10::ArrayRef devices,
+      const SyncTensorsConfig& config);
+
+  // Schedules the execution of a sync tensors operation in background. The
+  // asynchronous operation will hold the device locks by capturing the ones
+  // present within the coll structure.
+  std::shared_ptr ScheduleSyncTensorsGraph(
+      SyncTensorCollection* coll,
+      std::vector parameters_data,
+      std::vector tensors_data,
+      ComputationCache::TypePtr cached_computation);
+
+  std::shared_ptr ScheduleSyncTensorsGraph(
+      std::vector* tensors,
+      SyncTensorCollection* coll,
+      std::vector parameters_data,
+      ComputationCache::TypePtr cached_computation,
+      const std::vector& tensor_data_vec);
+
+  std::vector GetTensorsFused(std::vector* tensors);
+
+  std::vector FetchTensors(
+      std::vector* tensors,
+      c10::ArrayRef tensors_data,
+      const std::vector* indices);
+
+  // Gathers the device data for all the input tensors, after an
+  // asynchronous operation.
+  std::vector GatherTensorsData(
+      const std::vector& tensors,
+      c10::ArrayRef indices,
+      c10::ArrayRef tensors_data);
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h
new file mode 100644
index 0000000000000000000000000000000000000000..b651ecea24ec334bf5de4591552c9f733f4dc718
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/metrics.h
@@ -0,0 +1,286 @@
+/**
+ * This file is adapted from PyTorch/XLA
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
+ */
+
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+struct TORCH_API Sample {
+  Sample() = default;
+  Sample(int64_t timestamp_ns, double value)
+      : timestamp_ns(timestamp_ns), value(value) {}
+
+  int64_t timestamp_ns = 0;
+  double value = 0;
+};
+
+using MetricReprFn = std::function;
+
+// Class used to collect time-stamped numeric samples. The samples are stored in
+// a circular buffer whose size can be configured at constructor time.
+class TORCH_API MetricData {
+ public:
+  // Creates a new MetricData object with the internal circular buffer storing
+  // max_samples samples. The repr_fn argument allow to specify a function which
+  // pretty-prints a sample value.
+  MetricData(MetricReprFn repr_fn, size_t max_samples);
+
+  // Returns the total values of all the samples being posted to this metric.
+  double Accumulator() const;
+
+  size_t TotalSamples() const;
+
+  void AddSample(int64_t timestamp_ns, double value);
+
+  // Returns a vector with all the current samples, from the oldest to the
+  // newer. If accumulator is not nullptr, it will receive the current value of
+  // the metrics' accumulator (the sum of all posted values). If total_samples
+  // is not nullptr, it will receive the count of the posted values.
+  std::vector Samples(double* accumulator, size_t* total_samples) const;
+
+  std::string Repr(double value) const {
+    return repr_fn_(value);
+  }
+
+  void Reset();
+
+  bool IsValid() const {
+    return TotalSamples() > 0;
+  }
+
+ private:
+  mutable std::mutex lock_;
+  MetricReprFn repr_fn_;
+  size_t count_ = 0;
+  std::vector samples_;
+  double accumulator_ = 0.0;
+};
+
+// Counters are a very lightweight form of metrics which do not need to track
+// sample time.
+class TORCH_API CounterData {
+ public:
+  CounterData() : value_(0) {}
+
+  void AddValue(int64_t value) {
+    value_ += value;
+  }
+
+  int64_t Value() const {
+    return value_;
+  }
+
+  void Reset() {
+    value_ = 0;
+  }
+
+  bool IsValid() const {
+    return value_ > 0;
+  }
+
+ private:
+  std::atomic value_;
+};
+
+class TORCH_API MetricsArena {
+ public:
+  static MetricsArena* Get();
+
+  void ResetCounters();
+  void ResetMetrics();
+
+  // Registers a new metric in the global arena.
+  void RegisterMetric(
+      const std::string& name,
+      MetricReprFn repr_fn,
+      size_t max_samples,
+      std::shared_ptr* data);
+
+  void RegisterCounter(
+      const std::string& name,
+      std::shared_ptr* data);
+
+  void ForEachMetric(
+      const std::function& metric_func);
+
+  void ForEachCounter(
+      const std::function&
+          counter_func);
+
+  std::vector GetMetricNames();
+
+  MetricData* GetMetric(const std::string& name);
+
+  std::vector GetCounterNames();
+
+  CounterData* GetCounter(const std::string& name);
+
+ private:
+  std::mutex lock_;
+  std::map> metrics_;
+  std::map> counters_;
+};
+
+// Emits the value in a to_string() conversion.
+TORCH_API std::string MetricFnValue(double value);
+// Emits the value in a humanized bytes representation.
+TORCH_API std::string MetricFnBytes(double value);
+// Emits the value in a humanized time representation. The value is expressed in
+// nanoseconds EPOCH time.
+TORCH_API std::string MetricFnTime(double value);
+
+// The typical use of a Metric is one in which it gets created either in a
+// global scope context:
+//   static Metric* metric = new Metric("RpcCount");
+// Or within a function scope:
+//   void MyFunction(...) {
+//     static Metric* metric = new Metric("RpcCount");
+//     ...
+//     metric->AddSample(ts_nanos, some_value);
+//   }
+class TORCH_API Metric {
+ public:
+  explicit Metric(
+      std::string name,
+      MetricReprFn repr_fn = MetricFnValue,
+      size_t max_samples = 0);
+
+  const std::string& Name() const {
+    return name_;
+  }
+
+  double Accumulator() const;
+
+  void AddSample(int64_t timestamp_ns, double value);
+
+  void AddSample(double value);
+
+  std::vector Samples(double* accumulator, size_t* total_samples) const;
+
+  std::string Repr(double value) const;
+
+ private:
+  MetricData* GetData() const;
+
+  std::string name_;
+  MetricReprFn repr_fn_;
+  size_t max_samples_;
+  mutable std::shared_ptr data_ptr_;
+  mutable std::atomic data_;
+};
+
+// A Counter is a lightweight form of metric which tracks an integer value which
+// can increase or decrease.
+// A typical use is as:
+//   static Counter* counter = new Counter("MyCounter");
+//   ...
+//   counter->AddValue(+1);
+class TORCH_API Counter {
+ public:
+  explicit Counter(std::string name);
+
+  void AddValue(int64_t value) {
+    GetData()->AddValue(value);
+  }
+
+  int64_t Value() const {
+    return GetData()->Value();
+  }
+
+ private:
+  CounterData* GetData() const;
+
+  std::string name_;
+  mutable std::shared_ptr data_ptr_;
+  mutable std::atomic data_;
+};
+
+#define TORCH_LAZY_COUNTER(name, value)        \
+  do {                                         \
+    static ::torch::lazy::Counter* __counter = \
+        new ::torch::lazy::Counter(name);      \
+    __counter->AddValue(value);                \
+  } while (0)
+
+#define TORCH_LAZY_FN_COUNTER(ns) TORCH_LAZY_COUNTER(c10::str(ns, __func__), 1)
+
+#define TORCH_LAZY_VALUE_METRIC(name, value)                         \
+  do {                                                               \
+    static ::torch::lazy::Metric* __metric =                         \
+        new ::torch::lazy::Metric(name, torch::lazy::MetricFnValue); \
+    __metric->AddSample(value);                                      \
+  } while (0)
+
+// Creates a report with the current metrics statistics.
+TORCH_API std::string CreateMetricReport();
+
+// Creates a report with the selected metrics statistics.
+TORCH_API std::string CreateMetricReport(
+    const std::vector& counter_names,
+    const std::vector& metric_names);
+
+// Returns the currently registered metric names. Note that the list can grow
+// since metrics are usually function intialized (they are static function
+// variables).
+TORCH_API std::vector GetMetricNames();
+
+// Retrieves the metric data of a given metric, or nullptr if such metric does
+// not exist.
+TORCH_API MetricData* GetMetric(const std::string& name);
+
+// Returns the currently registered counter names. Note that the list can grow
+// since counters are usually function intialized (they are static function
+// variables).
+TORCH_API std::vector GetCounterNames();
+
+// Retrieves the counter data of a given counter, or nullptr if such counter
+// does not exist.
+TORCH_API CounterData* GetCounter(const std::string& name);
+
+// Retrieves the current EPOCH time in nanoseconds.
+TORCH_API int64_t NowNs();
+
+// Scope based utility class TORCH_API to measure the time the code takes within
+// a given C++ scope.
+class TORCH_API TimedSection {
+ public:
+  explicit TimedSection(Metric* metric) : metric_(metric), start_(NowNs()) {}
+
+  ~TimedSection() {
+    int64_t now = NowNs();
+    metric_->AddSample(now, now - start_);
+  }
+
+  double Elapsed() const {
+    return 1e-9 * static_cast(NowNs() - start_);
+  }
+
+ private:
+  Metric* metric_;
+  int64_t start_;
+};
+
+#define TORCH_LAZY_TIMED(name)                                  \
+  static torch::lazy::Metric* timed_metric =                    \
+      new torch::lazy::Metric(name, torch::lazy::MetricFnTime); \
+  torch::lazy::TimedSection timed_section(timed_metric)
+
+#define TORCH_LAZY_FN_COUNTER_TIMED_TRACING(ns) \
+  TORCH_LAZY_FN_COUNTER(ns);                    \
+  TORCH_LAZY_TIMED("LazyTracing")
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h
new file mode 100644
index 0000000000000000000000000000000000000000..d970b008e1b6b81ca7ad7535e98bedb702fc93a7
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/multi_wait.h
@@ -0,0 +1,62 @@
+/**
+ * This file is adapted from PyTorch/XLA
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/multi_wait.h
+ */
+
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+// Support waiting for a number of tasks to complete.
+class TORCH_API MultiWait {
+ public:
+  explicit MultiWait(size_t count) : count_(count) {}
+
+  // Signal the completion of a single task.
+  void Done();
+
+  // Waits until at least count (passed as constructor value) completions
+  // happened.
+  void Wait();
+
+  // Same as above, but waits up to wait_seconds.
+  void Wait(double wait_seconds);
+
+  // Resets the threshold counter for the MultiWait object. The completed count
+  // is also reset to zero.
+  void Reset(size_t count);
+
+  // Creates a completer functor which signals the mult wait object once func
+  // has completed. Handles exceptions by signaling the multi wait with the
+  // proper status value. This API returns a function which captures a MultiWait
+  // reference, so care must be taken such that the reference remains valid for
+  // the whole lifetime of the returned function.
+  std::function Completer(std::function func);
+
+  // Similar as the above API, but with explicit capture of the MultiWait shared
+  // pointer.
+  static std::function Completer(
+      std::shared_ptr mwait,
+      std::function func);
+
+ private:
+  void Complete(const std::function& func);
+
+  std::mutex mutex_;
+  std::condition_variable cv_;
+  size_t count_ = 0;
+  size_t completed_count_ = 0;
+  std::exception_ptr exptr_;
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h
new file mode 100644
index 0000000000000000000000000000000000000000..76ddea597a784ac7f64081f4b88b793e0e008b4e
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/shape_inference.h
@@ -0,0 +1,124 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+// Turn clang-format off, as we rely on the whole signature being on one line
+// for codegen.
+// clang-format off
+TORCH_API std::vector compute_shape__adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size);
+TORCH_API std::vector compute_shape__adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self);
+TORCH_API std::vector compute_shape__adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size);
+TORCH_API std::vector compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self);
+TORCH_API std::vector compute_shape_abs(const at::Tensor & self);
+TORCH_API std::vector compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out);
+TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, ::std::optional generator);
+TORCH_API std::vector compute_shape_bernoulli(const at::Tensor & self, double p, ::std::optional generator);
+TORCH_API std::vector compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction);
+TORCH_API std::vector compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction);
+TORCH_API std::vector compute_shape_cat(at::TensorList tensors, int64_t dim);
+TORCH_API std::vector compute_shape_cholesky(const at::Tensor & self, bool upper);
+TORCH_API std::vector compute_shape_clamp_min(const at::Tensor & self, const at::Scalar & min);
+TORCH_API std::vector compute_shape_clone(const at::Tensor & self, ::std::optional memory_format);
+TORCH_API std::vector compute_shape_constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value);
+TORCH_API std::vector compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups);
+TORCH_API std::vector compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask);
+TORCH_API std::vector compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse);
+TORCH_API std::vector compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
+TORCH_API std::vector compute_shape_expand(const at::Tensor & self, at::IntArrayRef size, bool implicit);
+TORCH_API std::vector compute_shape_expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
+TORCH_API std::vector compute_shape_flip(const at::Tensor & self, at::IntArrayRef dims);
+TORCH_API std::vector compute_shape_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
+TORCH_API std::vector compute_shape_glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim);
+TORCH_API std::vector compute_shape_grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
+TORCH_API std::vector compute_shape_grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask);
+TORCH_API std::vector compute_shape_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index);
+TORCH_API std::vector compute_shape_inverse(const at::Tensor & self);
+TORCH_API std::vector compute_shape_isnan(const at::Tensor & self);
+TORCH_API std::vector compute_shape_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer);
+TORCH_API std::vector compute_shape_log_sigmoid_forward(const at::Tensor & self);
+TORCH_API std::vector compute_shape_logdet(const at::Tensor & self);
+TORCH_API std::vector compute_shape_logical_and(const at::Tensor & self, const at::Tensor & other);
+TORCH_API std::vector compute_shape_logical_not(const at::Tensor & self);
+TORCH_API std::vector compute_shape_logical_or(const at::Tensor & self, const at::Tensor & other);
+TORCH_API std::vector compute_shape_logical_xor(const at::Tensor & self, const at::Tensor & other);
+TORCH_API std::vector compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value);
+TORCH_API std::vector compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value);
+TORCH_API std::vector compute_shape_max(const at::Tensor & self);
+TORCH_API std::vector compute_shape_mean(const at::Tensor & self, ::std::optional dtype);
+TORCH_API std::vector compute_shape_min(const at::Tensor & self);
+TORCH_API std::vector compute_shape_mv(const at::Tensor & self, const at::Tensor & vec);
+TORCH_API std::vector compute_shape_native_batch_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps);
+TORCH_API std::vector compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask);
+TORCH_API std::vector compute_shape_native_dropout(const at::Tensor & input, double p, ::std::optional train);
+TORCH_API std::vector compute_shape_native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale);
+TORCH_API std::vector compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps);
+TORCH_API std::vector compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional & weight, const ::std::optional & bias, ::std::array output_mask);
+TORCH_API std::vector compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory);
+TORCH_API std::vector compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
+TORCH_API std::vector compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index);
+TORCH_API std::vector compute_shape_nonzero(const at::Tensor & self);
+TORCH_API std::vector compute_shape_normal_functional(const at::Tensor & self, double mean, double std, ::std::optional generator);
+TORCH_API std::vector compute_shape_random(const at::Tensor & self, ::std::optional generator);
+TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t to, ::std::optional generator);
+TORCH_API std::vector compute_shape_random(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator);
+TORCH_API std::vector compute_shape_relu(const at::Tensor & self);
+TORCH_API std::vector compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
+TORCH_API std::vector compute_shape_slogdet(const at::Tensor & self);
+TORCH_API std::vector compute_shape_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta);
+TORCH_API std::vector compute_shape_sort(const at::Tensor & self, int64_t dim, bool descending);
+TORCH_API std::vector compute_shape_stack(at::TensorList tensors, int64_t dim);
+TORCH_API std::vector compute_shape_std(const at::Tensor & self, bool unbiased);
+TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim);
+TORCH_API std::vector compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim);
+TORCH_API std::vector compute_shape_sum(const at::Tensor & self, ::std::optional dtype);
+TORCH_API std::vector compute_shape__to_copy(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, bool non_blocking, ::std::optional memory_format);
+TORCH_API std::vector compute_shape_take(const at::Tensor & self, const at::Tensor & index);
+TORCH_API std::vector compute_shape_trace(const at::Tensor & self);
+TORCH_API std::vector compute_shape_zero(const at::Tensor & self);
+TORCH_API std::vector compute_shape_narrow_copy_symint(const at::Tensor & self, int64_t dim, int64_t start, c10::SymInt length);
+TORCH_API std::vector compute_shape_hardswish(const at::Tensor & self);
+TORCH_API std::vector compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self);
+TORCH_API std::vector compute_shape_selu(const at::Tensor & self);
+TORCH_API std::vector compute_shape_uniform(const at::Tensor & self, double from, double to, ::std::optional generator);
+
+// Non-Native ops
+TORCH_API std::vector compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type);
+TORCH_API std::vector compute_shape_expand(const Output& input0, const std::vector& size, const bool& is_scalar_expand);
+TORCH_API std::vector compute_shape_view(const Output& input0, const std::vector& output_sizes);
+TORCH_API std::vector compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const ::std::optional& stype);
+
+// View Ops
+// (Now that functionalization pass is used, we should kill these in a later PR)
+TORCH_API std::vector compute_shape_as_strided_view_update(const Output& target, const Output& input, const std::vector& size, const std::vector& stride, const int64_t& storage_offset);
+TORCH_API std::vector compute_shape_as_strided(const Output& input, const std::vector& size, const std::vector& stride, const int64_t& storage_offset);
+TORCH_API std::vector compute_shape_diagonal_view_update(const Output& target, const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
+TORCH_API std::vector compute_shape_diagonal(const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
+TORCH_API std::vector compute_shape_narrow_view_update(const Output& input, const Output& source, const std::vector& base_indices);
+TORCH_API std::vector compute_shape_narrow(const Output& input, const std::vector& base_indices, const std::vector& sizes);
+TORCH_API std::vector compute_shape_permute(const Output& input, const std::vector& dims);
+TORCH_API std::vector compute_shape_resize(const Output& input, const std::vector& size);
+TORCH_API std::vector compute_shape_select_view_update(const Output& target, const Output& source, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
+TORCH_API std::vector compute_shape_select(const Output& input, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
+TORCH_API std::vector compute_shape_squeeze(const Output& input, const int& dim);
+TORCH_API std::vector compute_shape_unsqueeze(const Output& input, const int& dim);
+
+TORCH_API std::vector compute_shape_select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index);
+TORCH_API std::vector compute_shape_diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2);
+TORCH_API std::vector compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step);
+TORCH_API std::vector compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset);
+// clang-format on
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h
new file mode 100644
index 0000000000000000000000000000000000000000..afc52376c55452b9c295e0e2716dd0abc2a4768f
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h
@@ -0,0 +1,259 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+class TORCH_API SymNodeImpl : public c10::SymNodeImpl {
+ public:
+  SymNodeImpl(NodePtr ptr) : node_(std::move(ptr)){};
+  NodePtr node_;
+};
+
+class LazyTensor;
+using LazyTensorPtr = c10::intrusive_ptr;
+
+class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
+ public:
+  // This is the core lazy tensor data structure where all the tensor data is
+  // held. The lazy tensor is nothing more than a shared pointer to a Data
+  // object.
+  struct Data {
+    Data(BackendDataPtr handle, BackendDevice device)
+        : handle(std::move(handle)),
+          device(std::move(device)),
+          unique_id(GetNextTensorId()) {}
+    Data(Value ir_value, BackendDevice device)
+        : ir_value(std::move(ir_value)),
+          device(std::move(device)),
+          unique_id(GetNextTensorId()) {}
+    Data(at::Tensor tensor_data, BackendDevice device)
+        : tensor_data(std::move(tensor_data)),
+          device(std::move(device)),
+          unique_id(GetNextTensorId()) {}
+    // TODO(alanwaketan): Remove this ctor. This is a
+    // temporary ctor to ease XLA LTC migration. It depends on
+    // XLA's Functionalization integration.
+    Data(BackendDevice device)
+        : device(std::move(device)), unique_id(GetNextTensorId()) {}
+
+    virtual ~Data();
+
+    BackendDataPtr handle;
+    Value ir_value;
+    std::optional tensor_data;
+    const BackendDevice device;
+    const int64_t unique_id = 0;
+    size_t generation = 1;
+  };
+
+  static LazyTensorPtr Create(
+      const at::Tensor& tensor,
+      const BackendDevice& device);
+  static LazyTensorPtr Create(Value ir_value, const BackendDevice& device);
+  static LazyTensorPtr Create(BackendDataPtr handle);
+  static LazyTensorPtr Create(std::shared_ptr data);
+
+  // The default ctor previously created a null LazyTensor (one with no 'data'
+  // obj). Creating a null LazyTensor is no longer possible, since the same can
+  // be achieved by creating a null LazyTensorPtr and it is way too confusing to
+  // have to check both lazy_tensor_ptr && *lazy_tensor_ptr, so everywhere that
+  // used to rely on a LazyTensor obj with a null Data can now rely on a null
+  // LazyTensorPtr instead.
+  LazyTensor() = delete;
+  LazyTensor(const LazyTensor&) = default;
+  LazyTensor(LazyTensor&&) noexcept = default;
+
+  ~LazyTensor() override = default;
+
+  size_t generation() const {
+    return data()->generation;
+  }
+
+  // Override it to use your own Shape.
+  virtual int64_t size(int64_t dim) const;
+
+  // Override it to use your own graph executor.
+  virtual at::Tensor ToTensor(bool detached);
+
+  void ShallowCopyTo(LazyTensorPtr dest) const;
+
+  // Assigns the tensor value to the lazy tensor.
+  void SetTensor(at::Tensor tensor);
+
+  void UpdateFromTensor(at::Tensor tensor, bool sync);
+  void UpdateFromTensorOut(at::Tensor tensor);
+  void UpdateFromTensorOut(const LazyTensorPtr& tensor);
+
+  const std::shared_ptr& data() const;
+
+  // Override it to use your own type conversion.
+  virtual at::ScalarType dtype() const;
+
+  MaybeRef shape() const;
+
+  const BackendDevice& GetDevice() const;
+  int64_t GetUniqueId() const;
+
+  // Fetches the data behind the tensor. If the tensor has a graph defining
+  // its current value, executes the graph and fetches the data result.
+  BackendDataPtr GetDataHandle();
+
+  // Fetches the current value of the data, which can be missing (nullptr)
+  // in case the tensor has a graph defining its current value,
+  BackendDataPtr CurrentDataHandle() const;
+
+  void SetDataHandle(BackendDataPtr handle);
+  void SetDataHandle(BackendDataPtr handle, bool sync);
+
+  // Retrieves the current IR Node, or nullptr in case no active IR Node is
+  // available.
+  Value CurrentIrValue() const;
+
+  // Retrieves the IR Node representing this LazyTensor. One will be created if
+  // missing. Note that although this is a const API, it actually changes the
+  // internal state ofthe object.
+  Value GetIrValue() const;
+
+  void SetIrValue(Value ir_value);
+  void SetInPlaceIrValue(Value ir_value);
+
+  std::optional CurrentTensorData() const;
+
+  std::vector MakeOutputTensors(NodePtr node) const;
+
+  LazyTensorPtr CopyTensorToDevice(const BackendDevice& device);
+
+  // Applies the queue of operations in preparation for using the data.
+  // Override it to use your own graph executor.
+  virtual void ApplyPendingGraph();
+
+  // Override it to set extra information.
+  virtual void AssignIrValue(Value ir_value) const;
+
+ protected:
+  explicit LazyTensor(std::shared_ptr data);
+
+  void SetTensorData(at::Tensor tensor_data);
+
+  // We build a graph accumulating operations, but at a given point we
+  // need to force a rendering, otherwise the graph can grow without control.
+  // Think:
+  //   for i in range(0, 100000):
+  //     a = a + b
+  void TryLimitGraphSize();
+
+  // Override it to instantiate your own data.
+  virtual Value GetIrValueForTensor(
+      const at::Tensor& tensor,
+      const BackendDevice& device) const;
+
+  Value CreateTensorNode(BackendDataPtr data, bool read_only) const;
+
+ private:
+  LazyTensor(const at::Tensor& tensor, const BackendDevice& device);
+  LazyTensor(Value ir_value, const BackendDevice& device);
+  explicit LazyTensor(BackendDataPtr handle);
+
+  static int64_t GetNextTensorId();
+
+  std::shared_ptr data_;
+};
+
+// Utils to convert at::Tensor to LazyTensor, and vice versa.
+
+// Section 0: c10::Tensorlist ==> lazy::TensorList
+// note: GetTensorList is not totally parallel to GetLtcTensor; A TensorList
+// skips
+//       the LazyTensor wrappers, assuming that the list of underlying IR nodes
+//       is actually more useful for downstream computations.  TBD.
+TORCH_API torch::lazy::Value GetTensorList(at::ITensorListRef tensors);
+
+// Section 1: at::Tensor => LazyTensor.
+// Extracts the LazyTensor out of an at::Tensor. Returns a null LazyTensor
+// if the tensor is not a lazy tensor.
+TORCH_API LazyTensorPtr TryGetLtcTensor(const at::Tensor& tensor);
+
+// Extracts the LazyTensor out of an at::Tensor. Throws an exception
+// if the tensor is not a lazy tensor.
+TORCH_API LazyTensorPtr GetLtcTensor(const at::Tensor& tensor);
+
+// Same as above, applied to a list of tensors.
+TORCH_API std::vector GetLtcTensors(
+    c10::ArrayRef tensors);
+
+// If tensor is a lazy tensor type, returns the LazyTensor embedded within it,
+// otherwise creates a new lazy tensor type with tensor as data.
+TORCH_API LazyTensorPtr GetOrCreateLtcTensor(
+    const std::optional& tensor,
+    const BackendDevice& device);
+
+TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber(
+    const at::Tensor& tensor,
+    const BackendDevice& device);
+
+// Section 2: LazyTensor => at::Tensor.
+// Creates an ATen tensor from an LazyTensor.
+TORCH_API at::Tensor CreateAtenFromLtcTensor(const LazyTensorPtr& ltc_tensor);
+TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor);
+
+// Note [Lazy Tensor Functionalization]
+// The functionalization pass is implemented by wrapping all TensorImpl
+// objects in C++ with an extra FunctionalTensorWrapper object,
+// that knows how to perform functionalization
+//
+// Certain functions in the aten API serve as entry/exit points for
+// functionalization, where we need to perform the wrapping/unwrapping:
+// - aten::to.device
+// - aten::empty
+
+// Given a non-lazy tensor, this function creates a lazy tensor on the specified
+// (lazy) device. The functionalize_output determines whether or not we should
+// wrap the output in a "functional wrapper".
+//
+// How do you know whether to pass true/false for functionalize_output?
+//
+// Case 1: nonlazy -> lazy
+//   If you're implementing a function that takes in nonlazy tensors and returns
+//   lazy tensors, then you should think of that function as an "entrypoint" to
+//   functionalization, and use functionalize_output=true Examples include:
+//   - factory functions (the LTC kernel for at::empty)
+//   - CPU -> Lazy device converions (the LTC kernel for at::to_device)
+//
+// Case 2: lazy -> lazy
+//   If you're implementing a function that takes in lazy tensors and returns
+//   lazy tensors,
+//   **but** requires creating lazy tensors internally,
+//   then you can assume that the current function is running inside of some
+//   outer context where functionalization is already running, that will take
+//   care of doing the wrapping for you, and use functionalize_output=true
+//   Examples include:
+//   - CPU fallback (takes in lazy tensors, converts to cpu, calls kernel,
+//   converts returns back to lazy tensors).
+TORCH_API at::Tensor to_lazy_tensor(
+    const at::Tensor& self,
+    const c10::TensorOptions& options,
+    at::Device device,
+    bool non_blocking,
+    bool functionalize_output);
+
+template 
+auto TupleAtenFromLtcTensorsImpl(
+    const std::vector& tensors,
+    std::index_sequence) {
+  return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...);
+}
+
+template 
+auto TupleAtenFromLtcTensors(const std::vector& tensors) {
+  return TupleAtenFromLtcTensorsImpl(tensors, std::make_index_sequence{});
+}
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h
new file mode 100644
index 0000000000000000000000000000000000000000..571a55b468fdd5a7114aa3c30221604ed0113795
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h
@@ -0,0 +1,37 @@
+/**
+ * This file is adapted from PyTorch/XLA
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
+ */
+
+#pragma once
+
+#include 
+#include 
+#include 
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+class TORCH_API Completion {
+ public:
+  class Data;
+
+  explicit Completion(std::shared_ptr data);
+
+  ~Completion();
+
+  void Wait();
+
+ private:
+  std::shared_ptr data_;
+};
+
+// Schedules a closure which might wait for IO or other events/conditions.
+TORCH_API void ScheduleIoClosure(std::function closure);
+TORCH_API Completion
+ScheduleIoClosureWithCompletion(std::function closure);
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h
new file mode 100644
index 0000000000000000000000000000000000000000..3088da160860b709ed208407aa75fff2a8c88819
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/unique.h
@@ -0,0 +1,56 @@
+/**
+ * Unique in this file is adapted from PyTorch/XLA
+ * https://github.com/pytorch/xla/blob/master/third_party/xla_client/unique.h
+ */
+
+#pragma once
+
+#include 
+
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+// Helper class to allow tracking zero or more things, which should be forcibly
+// be one only thing.
+template >
+class Unique {
+ public:
+  std::pair set(const T& value) {
+    if (value_) {
+      TORCH_CHECK(C()(*value_, value), "'", *value_, "' vs '", value);
+      return std::pair(false, *value_);
+    }
+    value_ = value;
+    return std::pair(true, *value_);
+  }
+
+  operator bool() const {
+    return value_.has_value();
+  }
+  operator const T&() const {
+    return *value_;
+  }
+  const T& operator*() const {
+    return *value_;
+  }
+  const T* operator->() const {
+    return value_.operator->();
+  }
+
+  std::set AsSet() const {
+    std::set vset;
+    if (value_.has_value()) {
+      vset.insert(*value_);
+    }
+    return vset;
+  }
+
+ private:
+  std::optional value_;
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h
new file mode 100644
index 0000000000000000000000000000000000000000..ac0320b9d0ac3cca01ddfc12f01e4a232fdda0df
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h
@@ -0,0 +1,7 @@
+#pragma once
+#include 
+
+// TODO(whc) unclear if this is useful, has only been tested as true
+C10_DECLARE_bool(torch_lazy_ts_tensor_update_sync);
+
+C10_DECLARE_bool(torch_lazy_ts_cuda);
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h
new file mode 100644
index 0000000000000000000000000000000000000000..9fff33135a5c87223d1b91a5797c42158a026e10
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ir_builder.h
@@ -0,0 +1,71 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+struct TorchScriptIrBuilder : IrBuilder {
+  NodePtr MakeDeviceData(
+      const std::shared_ptr& data) const override {
+    return DeviceData::Create(data);
+  }
+  // TODO: Scalar node is not currently used by ts_backend. Enable reusing
+  // Scalar node later if needed.
+  NodePtr MakeScalar(const at::Scalar& value, const at::ScalarType& type)
+      const override {
+    return MakeNode(value, type);
+  }
+  NodePtr MakeExpand(
+      const Value& input0,
+      const std::vector& size,
+      const bool& is_scalar_expand) const override {
+    return ReuseOrMakeNode(input0, size, is_scalar_expand);
+  }
+  NodePtr MakeCast(
+      const Value& input0,
+      const at::ScalarType& dtype,
+      const std::optional& stype =
+          std::nullopt) const override {
+    return ReuseOrMakeNode(input0, dtype, stype);
+  }
+  NodePtr MakeTensorList(const OpList& inputs) const override {
+    return ReuseOrMakeNode(inputs);
+  }
+  // Generic needs cleanup
+  NodePtr MakeGeneric(
+      const OpKind& op,
+      const OpList& operands,
+      const Shape& shape,
+      const size_t& num_outputs = 1,
+      const hash_t& hash_seed =
+          static_cast(0x5a2d296e9)) const override {
+    return MakeNode(op, operands, shape, num_outputs, hash_seed);
+  }
+
+  // dynamic ir nodes
+  // TODO: verify if IR node reusing works for Dynamic shape ops
+  NodePtr MakeSizeNode(const Value& input, size_t dim) const override {
+    return MakeNode(input, dim);
+  }
+  NodePtr MakeSizeAdd(const Value& a, const Value& b) const override {
+    return MakeNode(a, b);
+  }
+  NodePtr MakeSizeMul(const Value& a, const Value& b) const override {
+    return MakeNode(a, b);
+  }
+  NodePtr MakeSizeDiv(const Value& a, const Value& b) const override {
+    return MakeNode(a, b);
+  }
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..bf663f4ca6b1b78634a15bd65617328e92584239
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+//////////////////////////////////////////////////////////////////////////////
+// ATEN operators follows here, listed in alphabetical order.
+//////////////////////////////////////////////////////////////////////////////
+
+void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src);
+// Fills the input with the given value.
+void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value);
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h
new file mode 100644
index 0000000000000000000000000000000000000000..7e01724470384497a5fcdc9102d4b06403bdc640
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h
@@ -0,0 +1,24 @@
+#pragma once
+
+#include 
+
+namespace torch {
+namespace lazy {
+
+struct MaxPool3dAutogradFunctionTS
+    : public torch::autograd::Function {
+  static at::Tensor forward(
+      torch::autograd::AutogradContext* ctx,
+      at::Tensor self,
+      at::IntArrayRef kernel_size,
+      at::IntArrayRef stride,
+      at::IntArrayRef padding,
+      at::IntArrayRef dilation,
+      bool ceil_mode);
+  static torch::autograd::variable_list backward(
+      torch::autograd::AutogradContext* ctx,
+      torch::autograd::variable_list grad_output);
+};
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h
new file mode 100644
index 0000000000000000000000000000000000000000..9f993d6f30290eb5914603d013f49884fcb3ea69
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_eager_fallback.h
@@ -0,0 +1,27 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+bool force_eager_fallback(c10::Symbol op);
+void ltc_eager_fallback(
+    const c10::OperatorHandle& op,
+    torch::jit::Stack* stack);
+
+void ts_eager_fallback(
+    const c10::OperatorHandle& op,
+    torch::jit::Stack* stack,
+    c10::DeviceType device_type);
+
+// The TorchScript backend does not register itself with pytorch dispatcher
+// until it is explicitly initialized.  This function should only be called
+// by the main Torchscript backend init function.
+void register_ts_ltc_eager_fallback();
+
+} // namespace lazy
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h
new file mode 100644
index 0000000000000000000000000000000000000000..62cc9016f6ffa2e50fb4d19501823eedcc0befde
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h
@@ -0,0 +1,106 @@
+#pragma once
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+namespace torch {
+namespace lazy {
+
+using TSOpVector = std::vector;
+
+class TORCH_API TsNode : public lazy::Node {
+ public:
+  TsNode(
+      OpKind op,
+      OpList operands,
+      std::vector&& shapes,
+      size_t num_outputs,
+      hash_t hash_seed = kHashSeed);
+
+  TsNode(
+      OpKind op,
+      OpList operands,
+      const std::function& shape_fn,
+      size_t num_outputs,
+      hash_t hash_seed = kHashSeed);
+
+  TsNode(
+      OpKind op,
+      OpList operands,
+      size_t num_outputs,
+      hash_t hash_seed = kHashSeed);
+
+  TsNode(
+      OpKind op,
+      Shape shape,
+      size_t num_outputs,
+      hash_t hash_seed = kHashSeed);
+
+  ~TsNode() override = default;
+
+  hash_t hash() const override;
+
+  hash_t shapeHash() const override;
+
+  const std::string getPythonStacktrace() const;
+
+  // Lower is a backend-specific method since it returns a backend specific
+  // type. hence, it is convenient to define it differently per-backend rather
+  // than at Node API
+  virtual TSOpVector Lower(
+      std::shared_ptr function,
+      TSLoweringContext* loctx) const;
+
+ private:
+  // The hash of the dag WITH size info. Used for shape caching
+  hash_t shape_hash_;
+  // The hash of the dag used to look up the compiled graph by a hash
+  // in this case, we will use the dag hash WITHOUT size info if dynamic shape
+  // is enabled and use the dag hash WITH size info otherwise.
+  hash_t dag_hash_;
+};
+
+// Note: this OpKind is separate from ltc_ops.h since it would be a circular
+// import otherwise, I like leaving TensorList in this file, and I think most of
+// ltc_ops special cases will be deleted anyway
+const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list");
+
+// TensorList represents an at::TensorList which is a vector[Tensor] but is also
+// a first-class IValue and can be fed as a single input to a TS program.  It is
+// much easier to handle TensorLists in Lazy Tensor code if they are represented
+// as a single Node so there can be more than one TensorList and more than one
+// Tensor side-by-side as operands to an op.
+//
+// Note: shape is undefined for TensorList.  We assert in some places that
+// #shapes matches #outputs and this stems from
+//       the fact that currently all IR nodes represent tensors (there is no
+//       type system for this IR).  Becuase of this, TensorList is a bit of a
+//       hack.
+//
+// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and
+// then implement it as NotImplemented for TensorList, also fixing the assertion
+// that would fail.
+struct TORCH_API TensorList : public TsNode {
+  static OpKind ClassOpKind() {
+    return tensor_list_opkind;
+  }
+
+  TensorList() = delete;
+  TensorList(OpList values);
+
+  bool CanBeReused(OpList values) const {
+    return operands() == std::vector(values.begin(), values.end());
+  }
+
+  TSOpVector Lower(
+      std::shared_ptr function,
+      TSLoweringContext* loctx) const override;
+};
+
+} // namespace lazy
+} // namespace torch