diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html b/videollama2/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html
new file mode 100644
index 0000000000000000000000000000000000000000..92e4702f634dfb37a404bec3103b76f6afcaa917
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html
@@ -0,0 +1,3 @@
+
+bad old link
+
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..260a3e0ac50bbf4ad3935ac25bd3e3bd1d77d943
Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc differ
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc b/videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ca0ea87b22e830e999d4ad9c91e6c6d11415546
Binary files /dev/null and b/videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc differ
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/mod_with_constant.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/mod_with_constant.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef755dd1c7a8d1f116fe51f1b43315057f03379d
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/mod_with_constant.py
@@ -0,0 +1 @@
+value = 'three, sir!'
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/namespaces.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/namespaces.py
new file mode 100644
index 0000000000000000000000000000000000000000..248db98f97951aeeee0222131417e73074cc72d2
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/namespaces.py
@@ -0,0 +1,90 @@
+import ast
+import json
+import textwrap
+from pathlib import Path
+
+
+def iter_namespace_pkgs(namespace):
+ parts = namespace.split(".")
+ for i in range(len(parts)):
+ yield ".".join(parts[: i + 1])
+
+
+def build_namespace_package(tmpdir, name, version="1.0", impl="pkg_resources"):
+ src_dir = tmpdir / name
+ src_dir.mkdir()
+ setup_py = src_dir / 'setup.py'
+ namespace, _, rest = name.rpartition('.')
+ namespaces = list(iter_namespace_pkgs(namespace))
+ setup_args = {
+ "name": name,
+ "version": version,
+ "packages": namespaces,
+ }
+
+ if impl == "pkg_resources":
+ tmpl = '__import__("pkg_resources").declare_namespace(__name__)'
+ setup_args["namespace_packages"] = namespaces
+ elif impl == "pkgutil":
+ tmpl = '__path__ = __import__("pkgutil").extend_path(__path__, __name__)'
+ else:
+ raise ValueError(f"Cannot recognise {impl=} when creating namespaces")
+
+ args = json.dumps(setup_args, indent=4)
+ assert ast.literal_eval(args) # ensure it is valid Python
+
+ script = textwrap.dedent(
+ """\
+ import setuptools
+ args = {args}
+ setuptools.setup(**args)
+ """
+ ).format(args=args)
+ setup_py.write_text(script, encoding='utf-8')
+
+ ns_pkg_dir = Path(src_dir, namespace.replace(".", "/"))
+ ns_pkg_dir.mkdir(parents=True)
+
+ for ns in namespaces:
+ pkg_init = src_dir / ns.replace(".", "/") / '__init__.py'
+ pkg_init.write_text(tmpl, encoding='utf-8')
+
+ pkg_mod = ns_pkg_dir / (rest + '.py')
+ some_functionality = 'name = {rest!r}'.format(**locals())
+ pkg_mod.write_text(some_functionality, encoding='utf-8')
+ return src_dir
+
+
+def build_pep420_namespace_package(tmpdir, name):
+ src_dir = tmpdir / name
+ src_dir.mkdir()
+ pyproject = src_dir / "pyproject.toml"
+ namespace, _, rest = name.rpartition(".")
+ script = f"""\
+ [build-system]
+ requires = ["setuptools"]
+ build-backend = "setuptools.build_meta"
+
+ [project]
+ name = "{name}"
+ version = "3.14159"
+ """
+ pyproject.write_text(textwrap.dedent(script), encoding='utf-8')
+ ns_pkg_dir = Path(src_dir, namespace.replace(".", "/"))
+ ns_pkg_dir.mkdir(parents=True)
+ pkg_mod = ns_pkg_dir / (rest + ".py")
+ some_functionality = f"name = {rest!r}"
+ pkg_mod.write_text(some_functionality, encoding='utf-8')
+ return src_dir
+
+
+def make_site_dir(target):
+ """
+ Add a sitecustomize.py module in target to cause
+ target to be added to site dirs such that .pth files
+ are processed there.
+ """
+ sc = target / 'sitecustomize.py'
+ target_str = str(target)
+ tmpl = '__import__("site").addsitedir({target_str!r})'
+ sc.write_text(tmpl.format(**locals()), encoding='utf-8')
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_archive_util.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_archive_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3efc62889994fa68bc9170e8a0e403f48a204e1
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_archive_util.py
@@ -0,0 +1,36 @@
+import io
+import tarfile
+
+import pytest
+
+from setuptools import archive_util
+
+
+@pytest.fixture
+def tarfile_with_unicode(tmpdir):
+ """
+ Create a tarfile containing only a file whose name is
+ a zero byte file called testimäge.png.
+ """
+ tarobj = io.BytesIO()
+
+ with tarfile.open(fileobj=tarobj, mode="w:gz") as tgz:
+ data = b""
+
+ filename = "testimäge.png"
+
+ t = tarfile.TarInfo(filename)
+ t.size = len(data)
+
+ tgz.addfile(t, io.BytesIO(data))
+
+ target = tmpdir / 'unicode-pkg-1.0.tar.gz'
+ with open(str(target), mode='wb') as tf:
+ tf.write(tarobj.getvalue())
+ return str(target)
+
+
+@pytest.mark.xfail(reason="#710 and #712")
+def test_unicode_files(tarfile_with_unicode, tmpdir):
+ target = tmpdir / 'out'
+ archive_util.unpack_archive(tarfile_with_unicode, str(target))
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_packageindex.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_packageindex.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a6e5917a8733c71a5d9b29d4bde6fc12cc1e183
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_packageindex.py
@@ -0,0 +1,267 @@
+import http.client
+import re
+import urllib.error
+import urllib.request
+from inspect import cleandoc
+
+import pytest
+
+import setuptools.package_index
+
+import distutils.errors
+
+
+class TestPackageIndex:
+ def test_regex(self):
+ hash_url = 'http://other_url?:action=show_md5&'
+ hash_url += 'digest=0123456789abcdef0123456789abcdef'
+ doc = """
+ Name
+ (md5)
+ """.lstrip().format(**locals())
+ assert setuptools.package_index.PYPI_MD5.match(doc)
+
+ def test_bad_url_bad_port(self):
+ index = setuptools.package_index.PackageIndex()
+ url = 'http://127.0.0.1:0/nonesuch/test_package_index'
+ with pytest.raises(Exception, match=re.escape(url)):
+ v = index.open_url(url)
+ assert isinstance(v, urllib.error.HTTPError)
+
+ def test_bad_url_typo(self):
+ # issue 16
+ # easy_install inquant.contentmirror.plone breaks because of a typo
+ # in its home URL
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
+
+ url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
+
+ with pytest.raises(Exception, match=re.escape(url)):
+ v = index.open_url(url)
+ assert isinstance(v, urllib.error.HTTPError)
+
+ def test_bad_url_bad_status_line(self):
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
+
+ def _urlopen(*args):
+ raise http.client.BadStatusLine('line')
+
+ index.opener = _urlopen
+ url = 'http://example.com'
+ with pytest.raises(Exception, match=r'line'):
+ index.open_url(url)
+
+ def test_bad_url_double_scheme(self):
+ """
+ A bad URL with a double scheme should raise a DistutilsError.
+ """
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
+
+ # issue 20
+ url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
+ try:
+ index.open_url(url)
+ except distutils.errors.DistutilsError as error:
+ msg = str(error)
+ assert (
+ 'nonnumeric port' in msg
+ or 'getaddrinfo failed' in msg
+ or 'Name or service not known' in msg
+ )
+ return
+ raise RuntimeError("Did not raise")
+
+ def test_url_ok(self):
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
+ url = 'file:///tmp/test_package_index'
+ assert index.url_ok(url, True)
+
+ def test_parse_bdist_wininst(self):
+ parse = setuptools.package_index.parse_bdist_wininst
+
+ actual = parse('reportlab-2.5.win32-py2.4.exe')
+ expected = 'reportlab-2.5', '2.4', 'win32'
+ assert actual == expected
+
+ actual = parse('reportlab-2.5.win32.exe')
+ expected = 'reportlab-2.5', None, 'win32'
+ assert actual == expected
+
+ actual = parse('reportlab-2.5.win-amd64-py2.7.exe')
+ expected = 'reportlab-2.5', '2.7', 'win-amd64'
+ assert actual == expected
+
+ actual = parse('reportlab-2.5.win-amd64.exe')
+ expected = 'reportlab-2.5', None, 'win-amd64'
+ assert actual == expected
+
+ def test__vcs_split_rev_from_url(self):
+ """
+ Test the basic usage of _vcs_split_rev_from_url
+ """
+ vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
+ url, rev = vsrfu('https://example.com/bar@2995')
+ assert url == 'https://example.com/bar'
+ assert rev == '2995'
+
+ def test_local_index(self, tmpdir):
+ """
+ local_open should be able to read an index from the file system.
+ """
+ index_file = tmpdir / 'index.html'
+ with index_file.open('w') as f:
+ f.write('content
')
+ url = 'file:' + urllib.request.pathname2url(str(tmpdir)) + '/'
+ res = setuptools.package_index.local_open(url)
+ assert 'content' in res.read()
+
+ def test_egg_fragment(self):
+ """
+ EGG fragments must comply to PEP 440
+ """
+ epoch = [
+ '',
+ '1!',
+ ]
+ releases = [
+ '0',
+ '0.0',
+ '0.0.0',
+ ]
+ pre = [
+ 'a0',
+ 'b0',
+ 'rc0',
+ ]
+ post = ['.post0']
+ dev = [
+ '.dev0',
+ ]
+ local = [
+ ('', ''),
+ ('+ubuntu.0', '+ubuntu.0'),
+ ('+ubuntu-0', '+ubuntu.0'),
+ ('+ubuntu_0', '+ubuntu.0'),
+ ]
+ versions = [
+ [''.join([e, r, p, loc]) for loc in locs]
+ for e in epoch
+ for r in releases
+ for p in sum([pre, post, dev], [''])
+ for locs in local
+ ]
+ for v, vc in versions:
+ dists = list(
+ setuptools.package_index.distros_for_url(
+ 'http://example.com/example-foo.zip#egg=example-foo-' + v
+ )
+ )
+ assert dists[0].version == ''
+ assert dists[1].version == vc
+
+ def test_download_git_with_rev(self, tmp_path, fp):
+ url = 'git+https://github.example/group/project@master#egg=foo'
+ index = setuptools.package_index.PackageIndex()
+
+ expected_dir = tmp_path / 'project@master'
+ fp.register([
+ 'git',
+ 'clone',
+ '--quiet',
+ 'https://github.example/group/project',
+ expected_dir,
+ ])
+ fp.register(['git', '-C', expected_dir, 'checkout', '--quiet', 'master'])
+
+ result = index.download(url, tmp_path)
+
+ assert result == str(expected_dir)
+ assert len(fp.calls) == 2
+
+ def test_download_git_no_rev(self, tmp_path, fp):
+ url = 'git+https://github.example/group/project#egg=foo'
+ index = setuptools.package_index.PackageIndex()
+
+ expected_dir = tmp_path / 'project'
+ fp.register([
+ 'git',
+ 'clone',
+ '--quiet',
+ 'https://github.example/group/project',
+ expected_dir,
+ ])
+ index.download(url, tmp_path)
+
+ def test_download_svn(self, tmp_path):
+ url = 'svn+https://svn.example/project#egg=foo'
+ index = setuptools.package_index.PackageIndex()
+
+ msg = r".*SVN download is not supported.*"
+ with pytest.raises(distutils.errors.DistutilsError, match=msg):
+ index.download(url, tmp_path)
+
+
+class TestContentCheckers:
+ def test_md5(self):
+ checker = setuptools.package_index.HashChecker.from_url(
+ 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478'
+ )
+ checker.feed('You should probably not be using MD5'.encode('ascii'))
+ assert checker.hash.hexdigest() == 'f12895fdffbd45007040d2e44df98478'
+ assert checker.is_valid()
+
+ def test_other_fragment(self):
+ "Content checks should succeed silently if no hash is present"
+ checker = setuptools.package_index.HashChecker.from_url(
+ 'http://foo/bar#something%20completely%20different'
+ )
+ checker.feed('anything'.encode('ascii'))
+ assert checker.is_valid()
+
+ def test_blank_md5(self):
+ "Content checks should succeed if a hash is empty"
+ checker = setuptools.package_index.HashChecker.from_url('http://foo/bar#md5=')
+ checker.feed('anything'.encode('ascii'))
+ assert checker.is_valid()
+
+ def test_get_hash_name_md5(self):
+ checker = setuptools.package_index.HashChecker.from_url(
+ 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478'
+ )
+ assert checker.hash_name == 'md5'
+
+ def test_report(self):
+ checker = setuptools.package_index.HashChecker.from_url(
+ 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478'
+ )
+ rep = checker.report(lambda x: x, 'My message about %s')
+ assert rep == 'My message about md5'
+
+
+class TestPyPIConfig:
+ def test_percent_in_password(self, tmp_home_dir):
+ pypirc = tmp_home_dir / '.pypirc'
+ pypirc.write_text(
+ cleandoc(
+ """
+ [pypi]
+ repository=https://pypi.org
+ username=jaraco
+ password=pity%
+ """
+ ),
+ encoding="utf-8",
+ )
+ cfg = setuptools.package_index.PyPIConfig()
+ cred = cfg.creds_by_repository['https://pypi.org']
+ assert cred.username == 'jaraco'
+ assert cred.password == 'pity%'
+
+
+@pytest.mark.timeout(1)
+def test_REL_DoS():
+ """
+ REL should not hang on a contrived attack string.
+ """
+ setuptools.package_index.REL.search('< rel=' + ' ' * 2**12)
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_sdist.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_sdist.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee0511b1c485a1e4a9f028a29b7a17d8b4c8130
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_sdist.py
@@ -0,0 +1,972 @@
+"""sdist tests"""
+
+import contextlib
+import io
+import logging
+import os
+import pathlib
+import sys
+import tarfile
+import tempfile
+import unicodedata
+from inspect import cleandoc
+from pathlib import Path
+from unittest import mock
+
+import jaraco.path
+import pytest
+
+from setuptools import Command, SetuptoolsDeprecationWarning
+from setuptools._importlib import metadata
+from setuptools.command.egg_info import manifest_maker
+from setuptools.command.sdist import sdist
+from setuptools.dist import Distribution
+from setuptools.extension import Extension
+from setuptools.tests import fail_on_ascii
+
+from .text import Filenames
+
+import distutils
+from distutils.core import run_setup
+
+SETUP_ATTRS = {
+ 'name': 'sdist_test',
+ 'version': '0.0',
+ 'packages': ['sdist_test'],
+ 'package_data': {'sdist_test': ['*.txt']},
+ 'data_files': [("data", [os.path.join("d", "e.dat")])],
+}
+
+SETUP_PY = f"""\
+from setuptools import setup
+
+setup(**{SETUP_ATTRS!r})
+"""
+
+EXTENSION = Extension(
+ name="sdist_test.f",
+ sources=[os.path.join("sdist_test", "f.c")],
+ depends=[os.path.join("sdist_test", "f.h")],
+)
+EXTENSION_SOURCES = EXTENSION.sources + EXTENSION.depends
+
+
+@contextlib.contextmanager
+def quiet():
+ old_stdout, old_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
+ try:
+ yield
+ finally:
+ sys.stdout, sys.stderr = old_stdout, old_stderr
+
+
+# Convert to POSIX path
+def posix(path):
+ if not isinstance(path, str):
+ return path.replace(os.sep.encode('ascii'), b'/')
+ else:
+ return path.replace(os.sep, '/')
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
+ if isinstance(path, str):
+ return unicodedata.normalize('NFD', path)
+ try:
+ path = path.decode('utf-8')
+ path = unicodedata.normalize('NFD', path)
+ path = path.encode('utf-8')
+ except UnicodeError:
+ pass # Not UTF-8
+ return path
+
+
+def read_all_bytes(filename):
+ with open(filename, 'rb') as fp:
+ return fp.read()
+
+
+def latin1_fail():
+ try:
+ desc, filename = tempfile.mkstemp(suffix=Filenames.latin_1)
+ os.close(desc)
+ os.remove(filename)
+ except Exception:
+ return True
+
+
+fail_on_latin1_encoded_filenames = pytest.mark.xfail(
+ latin1_fail(),
+ reason="System does not support latin-1 filenames",
+)
+
+
+skip_under_xdist = pytest.mark.skipif(
+ "os.environ.get('PYTEST_XDIST_WORKER')",
+ reason="pytest-dev/pytest-xdist#843",
+)
+skip_under_stdlib_distutils = pytest.mark.skipif(
+ not distutils.__package__.startswith('setuptools'),
+ reason="the test is not supported with stdlib distutils",
+)
+
+
+def touch(path):
+ open(path, 'wb').close()
+ return path
+
+
+def symlink_or_skip_test(src, dst):
+ try:
+ os.symlink(src, dst)
+ except (OSError, NotImplementedError):
+ pytest.skip("symlink not supported in OS")
+ return None
+ return dst
+
+
+class TestSdistTest:
+ @pytest.fixture(autouse=True)
+ def source_dir(self, tmpdir):
+ tmpdir = tmpdir / "project_root"
+ tmpdir.mkdir()
+
+ (tmpdir / 'setup.py').write_text(SETUP_PY, encoding='utf-8')
+
+ # Set up the rest of the test package
+ test_pkg = tmpdir / 'sdist_test'
+ test_pkg.mkdir()
+ data_folder = tmpdir / 'd'
+ data_folder.mkdir()
+ # *.rst was not included in package_data, so c.rst should not be
+ # automatically added to the manifest when not under version control
+ for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
+ touch(test_pkg / fname)
+ touch(data_folder / 'e.dat')
+ # C sources are not included by default, but they will be,
+ # if an extension module uses them as sources or depends
+ for fname in EXTENSION_SOURCES:
+ touch(tmpdir / fname)
+
+ with tmpdir.as_cwd():
+ yield tmpdir
+
+ def assert_package_data_in_manifest(self, cmd):
+ manifest = cmd.filelist.files
+ assert os.path.join('sdist_test', 'a.txt') in manifest
+ assert os.path.join('sdist_test', 'b.txt') in manifest
+ assert os.path.join('sdist_test', 'c.rst') not in manifest
+ assert os.path.join('d', 'e.dat') in manifest
+
+ def setup_with_extension(self):
+ setup_attrs = {**SETUP_ATTRS, 'ext_modules': [EXTENSION]}
+
+ dist = Distribution(setup_attrs)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet():
+ cmd.run()
+
+ return cmd
+
+ def test_package_data_in_sdist(self):
+ """Regression test for pull request #4: ensures that files listed in
+ package_data are included in the manifest even if they're not added to
+ version control.
+ """
+
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet():
+ cmd.run()
+
+ self.assert_package_data_in_manifest(cmd)
+
+ def test_package_data_and_include_package_data_in_sdist(self):
+ """
+ Ensure package_data and include_package_data work
+ together.
+ """
+ setup_attrs = {**SETUP_ATTRS, 'include_package_data': True}
+ assert setup_attrs['package_data']
+
+ dist = Distribution(setup_attrs)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet():
+ cmd.run()
+
+ self.assert_package_data_in_manifest(cmd)
+
+ def test_extension_sources_in_sdist(self):
+ """
+ Ensure that the files listed in Extension.sources and Extension.depends
+ are automatically included in the manifest.
+ """
+ cmd = self.setup_with_extension()
+ self.assert_package_data_in_manifest(cmd)
+ manifest = cmd.filelist.files
+ for path in EXTENSION_SOURCES:
+ assert path in manifest
+
+ def test_missing_extension_sources(self):
+ """
+ Similar to test_extension_sources_in_sdist but the referenced files don't exist.
+ Missing files should not be included in distribution (with no error raised).
+ """
+ for path in EXTENSION_SOURCES:
+ os.remove(path)
+
+ cmd = self.setup_with_extension()
+ self.assert_package_data_in_manifest(cmd)
+ manifest = cmd.filelist.files
+ for path in EXTENSION_SOURCES:
+ assert path not in manifest
+
+ def test_symlinked_extension_sources(self):
+ """
+ Similar to test_extension_sources_in_sdist but the referenced files are
+ instead symbolic links to project-local files. Referenced file paths
+ should be included. Symlink targets themselves should NOT be included.
+ """
+ symlinked = []
+ for path in EXTENSION_SOURCES:
+ base, ext = os.path.splitext(path)
+ target = base + "_target." + ext
+
+ os.rename(path, target)
+ symlink_or_skip_test(os.path.basename(target), path)
+ symlinked.append(target)
+
+ cmd = self.setup_with_extension()
+ self.assert_package_data_in_manifest(cmd)
+ manifest = cmd.filelist.files
+ for path in EXTENSION_SOURCES:
+ assert path in manifest
+ for path in symlinked:
+ assert path not in manifest
+
+ _INVALID_PATHS = {
+ "must be relative": lambda: (
+ os.path.abspath(os.path.join("sdist_test", "f.h"))
+ ),
+ "can't have `..` segments": lambda: (
+ os.path.join("sdist_test", "..", "sdist_test", "f.h")
+ ),
+ "doesn't exist": lambda: (
+ os.path.join("sdist_test", "this_file_does_not_exist.h")
+ ),
+ "must be inside the project root": lambda: (
+ symlink_or_skip_test(
+ touch(os.path.join("..", "outside_of_project_root.h")),
+ "symlink.h",
+ )
+ ),
+ }
+
+ @skip_under_stdlib_distutils
+ @pytest.mark.parametrize("reason", _INVALID_PATHS.keys())
+ def test_invalid_extension_depends(self, reason, caplog):
+ """
+ Due to backwards compatibility reasons, `Extension.depends` should accept
+ invalid/weird paths, but then ignore them when building a sdist.
+
+ This test verifies that the source distribution is still built
+ successfully with such paths, but that instead of adding these paths to
+ the manifest, we emit an informational message, notifying the user that
+ the invalid path won't be automatically included.
+ """
+ invalid_path = self._INVALID_PATHS[reason]()
+ extension = Extension(
+ name="sdist_test.f",
+ sources=[],
+ depends=[invalid_path],
+ )
+ setup_attrs = {**SETUP_ATTRS, 'ext_modules': [extension]}
+
+ dist = Distribution(setup_attrs)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet(), caplog.at_level(logging.INFO):
+ cmd.run()
+
+ self.assert_package_data_in_manifest(cmd)
+ manifest = cmd.filelist.files
+ assert invalid_path not in manifest
+
+ expected_message = [
+ message
+ for (logger, level, message) in caplog.record_tuples
+ if (
+ logger == "root" #
+ and level == logging.INFO #
+ and invalid_path in message #
+ )
+ ]
+ assert len(expected_message) == 1
+ (expected_message,) = expected_message
+ assert reason in expected_message
+
+ def test_custom_build_py(self):
+ """
+ Ensure projects defining custom build_py don't break
+ when creating sdists (issue #2849)
+ """
+ from distutils.command.build_py import build_py as OrigBuildPy
+
+ using_custom_command_guard = mock.Mock()
+
+ class CustomBuildPy(OrigBuildPy):
+ """
+ Some projects have custom commands inheriting from `distutils`
+ """
+
+ def get_data_files(self):
+ using_custom_command_guard()
+ return super().get_data_files()
+
+ setup_attrs = {**SETUP_ATTRS, 'include_package_data': True}
+ assert setup_attrs['package_data']
+
+ dist = Distribution(setup_attrs)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ # Make sure we use the custom command
+ cmd.cmdclass = {'build_py': CustomBuildPy}
+ cmd.distribution.cmdclass = {'build_py': CustomBuildPy}
+ assert cmd.distribution.get_command_class('build_py') == CustomBuildPy
+
+ msg = "setuptools instead of distutils"
+ with quiet(), pytest.warns(SetuptoolsDeprecationWarning, match=msg):
+ cmd.run()
+
+ using_custom_command_guard.assert_called()
+ self.assert_package_data_in_manifest(cmd)
+
+ def test_setup_py_exists(self):
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'foo.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet():
+ cmd.run()
+
+ manifest = cmd.filelist.files
+ assert 'setup.py' in manifest
+
+ def test_setup_py_missing(self):
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'foo.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ if os.path.exists("setup.py"):
+ os.remove("setup.py")
+ with quiet():
+ cmd.run()
+
+ manifest = cmd.filelist.files
+ assert 'setup.py' not in manifest
+
+ def test_setup_py_excluded(self):
+ with open("MANIFEST.in", "w", encoding="utf-8") as manifest_file:
+ manifest_file.write("exclude setup.py")
+
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'foo.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet():
+ cmd.run()
+
+ manifest = cmd.filelist.files
+ assert 'setup.py' not in manifest
+
+ def test_defaults_case_sensitivity(self, source_dir):
+ """
+ Make sure default files (README.*, etc.) are added in a case-sensitive
+ way to avoid problems with packages built on Windows.
+ """
+
+ touch(source_dir / 'readme.rst')
+ touch(source_dir / 'SETUP.cfg')
+
+ dist = Distribution(SETUP_ATTRS)
+ # the extension deliberately capitalized for this test
+ # to make sure the actual filename (not capitalized) gets added
+ # to the manifest
+ dist.script_name = 'setup.PY'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ with quiet():
+ cmd.run()
+
+ # lowercase all names so we can test in a
+ # case-insensitive way to make sure the files
+ # are not included.
+ manifest = map(lambda x: x.lower(), cmd.filelist.files)
+ assert 'readme.rst' not in manifest, manifest
+ assert 'setup.py' not in manifest, manifest
+ assert 'setup.cfg' not in manifest, manifest
+
+ def test_exclude_dev_only_cache_folders(self, source_dir):
+ included = {
+ # Emulate problem in https://github.com/pypa/setuptools/issues/4601
+ "MANIFEST.in": (
+ "global-include LICEN[CS]E* COPYING* NOTICE* AUTHORS*\n"
+ "global-include *.txt\n"
+ ),
+ # For the sake of being conservative and limiting unforeseen side-effects
+ # we just exclude dev-only cache folders at the root of the repository:
+ "test/.venv/lib/python3.9/site-packages/bar-2.dist-info/AUTHORS.rst": "",
+ "src/.nox/py/lib/python3.12/site-packages/bar-2.dist-info/COPYING.txt": "",
+ "doc/.tox/default/lib/python3.11/site-packages/foo-4.dist-info/LICENSE": "",
+ # Let's test against false positives with similarly named files:
+ ".venv-requirements.txt": "",
+ ".tox-coveragerc.txt": "",
+ ".noxy/coveragerc.txt": "",
+ }
+
+ excluded = {
+ # .tox/.nox/.venv are well-know folders present at the root of Python repos
+ # and therefore should be excluded
+ ".tox/release/lib/python3.11/site-packages/foo-4.dist-info/LICENSE": "",
+ ".nox/py/lib/python3.12/site-packages/bar-2.dist-info/COPYING.txt": "",
+ ".venv/lib/python3.9/site-packages/bar-2.dist-info/AUTHORS.rst": "",
+ }
+
+ for file, content in {**excluded, **included}.items():
+ Path(source_dir, file).parent.mkdir(parents=True, exist_ok=True)
+ Path(source_dir, file).write_text(content, encoding="utf-8")
+
+ cmd = self.setup_with_extension()
+ self.assert_package_data_in_manifest(cmd)
+ manifest = {f.replace(os.sep, '/') for f in cmd.filelist.files}
+ for path in excluded:
+ assert os.path.exists(path)
+ assert path not in manifest, (path, manifest)
+ for path in included:
+ assert os.path.exists(path)
+ assert path in manifest, (path, manifest)
+
+ @fail_on_ascii
+ def test_manifest_is_written_with_utf8_encoding(self):
+ # Test for #303.
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ mm = manifest_maker(dist)
+ mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+ os.mkdir('sdist_test.egg-info')
+
+ # UTF-8 filename
+ filename = os.path.join('sdist_test', 'smörbröd.py')
+
+ # Must create the file or it will get stripped.
+ touch(filename)
+
+ # Add UTF-8 filename and write manifest
+ with quiet():
+ mm.run()
+ mm.filelist.append(filename)
+ mm.write_manifest()
+
+ contents = read_all_bytes(mm.manifest)
+
+ # The manifest should be UTF-8 encoded
+ u_contents = contents.decode('UTF-8')
+
+ # The manifest should contain the UTF-8 filename
+ assert posix(filename) in u_contents
+
+ @fail_on_ascii
+ def test_write_manifest_allows_utf8_filenames(self):
+ # Test for #303.
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ mm = manifest_maker(dist)
+ mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+ os.mkdir('sdist_test.egg-info')
+
+ filename = os.path.join(b'sdist_test', Filenames.utf_8)
+
+ # Must touch the file or risk removal
+ touch(filename)
+
+ # Add filename and write manifest
+ with quiet():
+ mm.run()
+ u_filename = filename.decode('utf-8')
+ mm.filelist.files.append(u_filename)
+ # Re-write manifest
+ mm.write_manifest()
+
+ contents = read_all_bytes(mm.manifest)
+
+ # The manifest should be UTF-8 encoded
+ contents.decode('UTF-8')
+
+ # The manifest should contain the UTF-8 filename
+ assert posix(filename) in contents
+
+ # The filelist should have been updated as well
+ assert u_filename in mm.filelist.files
+
+ @skip_under_xdist
+ def test_write_manifest_skips_non_utf8_filenames(self):
+ """
+ Files that cannot be encoded to UTF-8 (specifically, those that
+ weren't originally successfully decoded and have surrogate
+ escapes) should be omitted from the manifest.
+ See https://bitbucket.org/tarek/distribute/issue/303 for history.
+ """
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ mm = manifest_maker(dist)
+ mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+ os.mkdir('sdist_test.egg-info')
+
+ # Latin-1 filename
+ filename = os.path.join(b'sdist_test', Filenames.latin_1)
+
+ # Add filename with surrogates and write manifest
+ with quiet():
+ mm.run()
+ u_filename = filename.decode('utf-8', 'surrogateescape')
+ mm.filelist.append(u_filename)
+ # Re-write manifest
+ mm.write_manifest()
+
+ contents = read_all_bytes(mm.manifest)
+
+ # The manifest should be UTF-8 encoded
+ contents.decode('UTF-8')
+
+ # The Latin-1 filename should have been skipped
+ assert posix(filename) not in contents
+
+ # The filelist should have been updated as well
+ assert u_filename not in mm.filelist.files
+
+ @fail_on_ascii
+ def test_manifest_is_read_with_utf8_encoding(self):
+ # Test for #303.
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ # Create manifest
+ with quiet():
+ cmd.run()
+
+ # Add UTF-8 filename to manifest
+ filename = os.path.join(b'sdist_test', Filenames.utf_8)
+ cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+ manifest = open(cmd.manifest, 'ab')
+ manifest.write(b'\n' + filename)
+ manifest.close()
+
+ # The file must exist to be included in the filelist
+ touch(filename)
+
+ # Re-read manifest
+ cmd.filelist.files = []
+ with quiet():
+ cmd.read_manifest()
+
+ # The filelist should contain the UTF-8 filename
+ filename = filename.decode('utf-8')
+ assert filename in cmd.filelist.files
+
+ @fail_on_latin1_encoded_filenames
+ def test_read_manifest_skips_non_utf8_filenames(self):
+ # Test for #303.
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ # Create manifest
+ with quiet():
+ cmd.run()
+
+ # Add Latin-1 filename to manifest
+ filename = os.path.join(b'sdist_test', Filenames.latin_1)
+ cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
+ manifest = open(cmd.manifest, 'ab')
+ manifest.write(b'\n' + filename)
+ manifest.close()
+
+ # The file must exist to be included in the filelist
+ touch(filename)
+
+ # Re-read manifest
+ cmd.filelist.files = []
+ with quiet():
+ cmd.read_manifest()
+
+ # The Latin-1 filename should have been skipped
+ filename = filename.decode('latin-1')
+ assert filename not in cmd.filelist.files
+
+ @fail_on_ascii
+ @fail_on_latin1_encoded_filenames
+ def test_sdist_with_utf8_encoded_filename(self):
+ # Test for #303.
+ dist = Distribution(self.make_strings(SETUP_ATTRS))
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ filename = os.path.join(b'sdist_test', Filenames.utf_8)
+ touch(filename)
+
+ with quiet():
+ cmd.run()
+
+ if sys.platform == 'darwin':
+ filename = decompose(filename)
+
+ fs_enc = sys.getfilesystemencoding()
+
+ if sys.platform == 'win32':
+ if fs_enc == 'cp1252':
+ # Python mangles the UTF-8 filename
+ filename = filename.decode('cp1252')
+ assert filename in cmd.filelist.files
+ else:
+ filename = filename.decode('mbcs')
+ assert filename in cmd.filelist.files
+ else:
+ filename = filename.decode('utf-8')
+ assert filename in cmd.filelist.files
+
+ @classmethod
+ def make_strings(cls, item):
+ if isinstance(item, dict):
+ return {key: cls.make_strings(value) for key, value in item.items()}
+ if isinstance(item, list):
+ return list(map(cls.make_strings, item))
+ return str(item)
+
+ @fail_on_latin1_encoded_filenames
+ @skip_under_xdist
+ def test_sdist_with_latin1_encoded_filename(self):
+ # Test for #303.
+ dist = Distribution(self.make_strings(SETUP_ATTRS))
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+
+ # Latin-1 filename
+ filename = os.path.join(b'sdist_test', Filenames.latin_1)
+ touch(filename)
+ assert os.path.isfile(filename)
+
+ with quiet():
+ cmd.run()
+
+ # not all windows systems have a default FS encoding of cp1252
+ if sys.platform == 'win32':
+ # Latin-1 is similar to Windows-1252 however
+ # on mbcs filesys it is not in latin-1 encoding
+ fs_enc = sys.getfilesystemencoding()
+ if fs_enc != 'mbcs':
+ fs_enc = 'latin-1'
+ filename = filename.decode(fs_enc)
+
+ assert filename in cmd.filelist.files
+ else:
+ # The Latin-1 filename should have been skipped
+ filename = filename.decode('latin-1')
+ assert filename not in cmd.filelist.files
+
+ _EXAMPLE_DIRECTIVES = {
+ "setup.cfg - long_description and version": """
+ [metadata]
+ name = testing
+ version = file: src/VERSION.txt
+ license_files = DOWHATYOUWANT
+ long_description = file: README.rst, USAGE.rst
+ """,
+ "pyproject.toml - static readme/license files and dynamic version": """
+ [project]
+ name = "testing"
+ readme = "USAGE.rst"
+ license = {file = "DOWHATYOUWANT"}
+ dynamic = ["version"]
+ [tool.setuptools.dynamic]
+ version = {file = ["src/VERSION.txt"]}
+ """,
+ "pyproject.toml - directive with str instead of list": """
+ [project]
+ name = "testing"
+ readme = "USAGE.rst"
+ license = {file = "DOWHATYOUWANT"}
+ dynamic = ["version"]
+ [tool.setuptools.dynamic]
+ version = {file = "src/VERSION.txt"}
+ """,
+ }
+
+ @pytest.mark.parametrize("config", _EXAMPLE_DIRECTIVES.keys())
+ def test_add_files_referenced_by_config_directives(self, source_dir, config):
+ config_file, _, _ = config.partition(" - ")
+ config_text = self._EXAMPLE_DIRECTIVES[config]
+ (source_dir / 'src').mkdir()
+ (source_dir / 'src/VERSION.txt').write_text("0.42", encoding="utf-8")
+ (source_dir / 'README.rst').write_text("hello world!", encoding="utf-8")
+ (source_dir / 'USAGE.rst').write_text("hello world!", encoding="utf-8")
+ (source_dir / 'DOWHATYOUWANT').write_text("hello world!", encoding="utf-8")
+ (source_dir / config_file).write_text(config_text, encoding="utf-8")
+
+ dist = Distribution({"packages": []})
+ dist.script_name = 'setup.py'
+ dist.parse_config_files()
+
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+ with quiet():
+ cmd.run()
+
+ assert (
+ 'src/VERSION.txt' in cmd.filelist.files
+ or 'src\\VERSION.txt' in cmd.filelist.files
+ )
+ assert 'USAGE.rst' in cmd.filelist.files
+ assert 'DOWHATYOUWANT' in cmd.filelist.files
+ assert '/' not in cmd.filelist.files
+ assert '\\' not in cmd.filelist.files
+
+ def test_pyproject_toml_in_sdist(self, source_dir):
+ """
+ Check if pyproject.toml is included in source distribution if present
+ """
+ touch(source_dir / 'pyproject.toml')
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+ with quiet():
+ cmd.run()
+ manifest = cmd.filelist.files
+ assert 'pyproject.toml' in manifest
+
+ def test_pyproject_toml_excluded(self, source_dir):
+ """
+ Check that pyproject.toml can excluded even if present
+ """
+ touch(source_dir / 'pyproject.toml')
+ with open('MANIFEST.in', 'w', encoding="utf-8") as mts:
+ print('exclude pyproject.toml', file=mts)
+ dist = Distribution(SETUP_ATTRS)
+ dist.script_name = 'setup.py'
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+ with quiet():
+ cmd.run()
+ manifest = cmd.filelist.files
+ assert 'pyproject.toml' not in manifest
+
+ def test_build_subcommand_source_files(self, source_dir):
+ touch(source_dir / '.myfile~')
+
+ # Sanity check: without custom commands file list should not be affected
+ dist = Distribution({**SETUP_ATTRS, "script_name": "setup.py"})
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+ with quiet():
+ cmd.run()
+ manifest = cmd.filelist.files
+ assert '.myfile~' not in manifest
+
+ # Test: custom command should be able to augment file list
+ dist = Distribution({**SETUP_ATTRS, "script_name": "setup.py"})
+ build = dist.get_command_obj("build")
+ build.sub_commands = [*build.sub_commands, ("build_custom", None)]
+
+ class build_custom(Command):
+ def initialize_options(self): ...
+
+ def finalize_options(self): ...
+
+ def run(self): ...
+
+ def get_source_files(self):
+ return ['.myfile~']
+
+ dist.cmdclass.update(build_custom=build_custom)
+
+ cmd = sdist(dist)
+ cmd.use_defaults = True
+ cmd.ensure_finalized()
+ with quiet():
+ cmd.run()
+ manifest = cmd.filelist.files
+ assert '.myfile~' in manifest
+
+ @pytest.mark.skipif("os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'stdlib'")
+ def test_build_base_pathlib(self, source_dir):
+ """
+ Ensure if build_base is a pathlib.Path, the build still succeeds.
+ """
+ dist = Distribution({
+ **SETUP_ATTRS,
+ "script_name": "setup.py",
+ "options": {"build": {"build_base": pathlib.Path('build')}},
+ })
+ cmd = sdist(dist)
+ cmd.ensure_finalized()
+ with quiet():
+ cmd.run()
+
+
+def test_default_revctrl():
+ """
+ When _default_revctrl was removed from the `setuptools.command.sdist`
+ module in 10.0, it broke some systems which keep an old install of
+ setuptools (Distribute) around. Those old versions require that the
+ setuptools package continue to implement that interface, so this
+ function provides that interface, stubbed. See #320 for details.
+
+ This interface must be maintained until Ubuntu 12.04 is no longer
+ supported (by Setuptools).
+ """
+ (ep,) = metadata.EntryPoints._from_text(
+ """
+ [setuptools.file_finders]
+ svn_cvs = setuptools.command.sdist:_default_revctrl
+ """
+ )
+ res = ep.load()
+ assert hasattr(res, '__iter__')
+
+
+class TestRegressions:
+ """
+ Can be removed/changed if the project decides to change how it handles symlinks
+ or external files.
+ """
+
+ @staticmethod
+ def files_for_symlink_in_extension_depends(tmp_path, dep_path):
+ return {
+ "external": {
+ "dir": {"file.h": ""},
+ },
+ "project": {
+ "setup.py": cleandoc(
+ f"""
+ from setuptools import Extension, setup
+ setup(
+ name="myproj",
+ version="42",
+ ext_modules=[
+ Extension(
+ "hello", sources=["hello.pyx"],
+ depends=[{dep_path!r}]
+ )
+ ],
+ )
+ """
+ ),
+ "hello.pyx": "",
+ "MANIFEST.in": "global-include *.h",
+ },
+ }
+
+ @pytest.mark.parametrize(
+ "dep_path", ("myheaders/dir/file.h", "myheaders/dir/../dir/file.h")
+ )
+ def test_symlink_in_extension_depends(self, monkeypatch, tmp_path, dep_path):
+ # Given a project with a symlinked dir and a "depends" targeting that dir
+ files = self.files_for_symlink_in_extension_depends(tmp_path, dep_path)
+ jaraco.path.build(files, prefix=str(tmp_path))
+ symlink_or_skip_test(tmp_path / "external", tmp_path / "project/myheaders")
+
+ # When `sdist` runs, there should be no error
+ members = run_sdist(monkeypatch, tmp_path / "project")
+ # and the sdist should contain the symlinked files
+ for expected in (
+ "myproj-42/hello.pyx",
+ "myproj-42/myheaders/dir/file.h",
+ ):
+ assert expected in members
+
+ @staticmethod
+ def files_for_external_path_in_extension_depends(tmp_path, dep_path):
+ head, _, tail = dep_path.partition("$tmp_path$/")
+ dep_path = tmp_path / tail if tail else head
+
+ return {
+ "external": {
+ "dir": {"file.h": ""},
+ },
+ "project": {
+ "setup.py": cleandoc(
+ f"""
+ from setuptools import Extension, setup
+ setup(
+ name="myproj",
+ version="42",
+ ext_modules=[
+ Extension(
+ "hello", sources=["hello.pyx"],
+ depends=[{str(dep_path)!r}]
+ )
+ ],
+ )
+ """
+ ),
+ "hello.pyx": "",
+ "MANIFEST.in": "global-include *.h",
+ },
+ }
+
+ @pytest.mark.parametrize(
+ "dep_path", ("$tmp_path$/external/dir/file.h", "../external/dir/file.h")
+ )
+ def test_external_path_in_extension_depends(self, monkeypatch, tmp_path, dep_path):
+ # Given a project with a "depends" targeting an external dir
+ files = self.files_for_external_path_in_extension_depends(tmp_path, dep_path)
+ jaraco.path.build(files, prefix=str(tmp_path))
+ # When `sdist` runs, there should be no error
+ members = run_sdist(monkeypatch, tmp_path / "project")
+ # and the sdist should not contain the external file
+ for name in members:
+ assert "file.h" not in name
+
+
+def run_sdist(monkeypatch, project):
+ """Given a project directory, run the sdist and return its contents"""
+ monkeypatch.chdir(project)
+ with quiet():
+ run_setup("setup.py", ["sdist"])
+
+ archive = next((project / "dist").glob("*.tar.gz"))
+ with tarfile.open(str(archive)) as tar:
+ return set(tar.getnames())
+
+
+def test_sanity_check_setuptools_own_sdist(setuptools_sdist):
+ with tarfile.open(setuptools_sdist) as tar:
+ files = tar.getnames()
+
+ # setuptools sdist should not include the .tox folder
+ tox_files = [name for name in files if ".tox" in name]
+ assert len(tox_files) == 0, f"not empty {tox_files}"
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/test_wheel.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..70165c608b60bc70a908a8b56e1f1105feeb4712
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/test_wheel.py
@@ -0,0 +1,714 @@
+"""wheel tests"""
+
+from __future__ import annotations
+
+import contextlib
+import glob
+import inspect
+import os
+import pathlib
+import shutil
+import stat
+import subprocess
+import sys
+import zipfile
+from typing import Any
+
+import pytest
+from jaraco import path
+from packaging.tags import parse_tag
+from packaging.utils import canonicalize_name
+
+from pkg_resources import PY_MAJOR, Distribution, PathMetadata
+from setuptools.wheel import Wheel
+
+from .contexts import tempdir
+from .textwrap import DALS
+
+from distutils.sysconfig import get_config_var
+from distutils.util import get_platform
+
+WHEEL_INFO_TESTS = (
+ ('invalid.whl', ValueError),
+ (
+ 'simplewheel-2.0-1-py2.py3-none-any.whl',
+ {
+ 'project_name': 'simplewheel',
+ 'version': '2.0',
+ 'build': '1',
+ 'py_version': 'py2.py3',
+ 'abi': 'none',
+ 'platform': 'any',
+ },
+ ),
+ (
+ 'simple.dist-0.1-py2.py3-none-any.whl',
+ {
+ 'project_name': 'simple.dist',
+ 'version': '0.1',
+ 'build': None,
+ 'py_version': 'py2.py3',
+ 'abi': 'none',
+ 'platform': 'any',
+ },
+ ),
+ (
+ 'example_pkg_a-1-py3-none-any.whl',
+ {
+ 'project_name': 'example_pkg_a',
+ 'version': '1',
+ 'build': None,
+ 'py_version': 'py3',
+ 'abi': 'none',
+ 'platform': 'any',
+ },
+ ),
+ (
+ 'PyQt5-5.9-5.9.1-cp35.cp36.cp37-abi3-manylinux1_x86_64.whl',
+ {
+ 'project_name': 'PyQt5',
+ 'version': '5.9',
+ 'build': '5.9.1',
+ 'py_version': 'cp35.cp36.cp37',
+ 'abi': 'abi3',
+ 'platform': 'manylinux1_x86_64',
+ },
+ ),
+)
+
+
+@pytest.mark.parametrize(
+ ('filename', 'info'), WHEEL_INFO_TESTS, ids=[t[0] for t in WHEEL_INFO_TESTS]
+)
+def test_wheel_info(filename, info):
+ if inspect.isclass(info):
+ with pytest.raises(info):
+ Wheel(filename)
+ return
+ w = Wheel(filename)
+ assert {k: getattr(w, k) for k in info.keys()} == info
+
+
+@contextlib.contextmanager
+def build_wheel(extra_file_defs=None, **kwargs):
+ file_defs = {
+ 'setup.py': (
+ DALS(
+ """
+ # -*- coding: utf-8 -*-
+ from setuptools import setup
+ import setuptools
+ setup(**%r)
+ """
+ )
+ % kwargs
+ ).encode('utf-8'),
+ }
+ if extra_file_defs:
+ file_defs.update(extra_file_defs)
+ with tempdir() as source_dir:
+ path.build(file_defs, source_dir)
+ subprocess.check_call(
+ (sys.executable, 'setup.py', '-q', 'bdist_wheel'), cwd=source_dir
+ )
+ yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
+
+
+def tree_set(root):
+ contents = set()
+ for dirpath, dirnames, filenames in os.walk(root):
+ for filename in filenames:
+ contents.add(os.path.join(os.path.relpath(dirpath, root), filename))
+ return contents
+
+
+def flatten_tree(tree):
+ """Flatten nested dicts and lists into a full list of paths"""
+ output = set()
+ for node, contents in tree.items():
+ if isinstance(contents, dict):
+ contents = flatten_tree(contents)
+
+ for elem in contents:
+ if isinstance(elem, dict):
+ output |= {os.path.join(node, val) for val in flatten_tree(elem)}
+ else:
+ output.add(os.path.join(node, elem))
+ return output
+
+
+def format_install_tree(tree):
+ return {
+ x.format(
+ py_version=PY_MAJOR,
+ platform=get_platform(),
+ shlib_ext=get_config_var('EXT_SUFFIX') or get_config_var('SO'),
+ )
+ for x in tree
+ }
+
+
+def _check_wheel_install(
+ filename, install_dir, install_tree_includes, project_name, version, requires_txt
+):
+ w = Wheel(filename)
+ egg_path = os.path.join(install_dir, w.egg_name())
+ w.install_as_egg(egg_path)
+ if install_tree_includes is not None:
+ install_tree = format_install_tree(install_tree_includes)
+ exp = tree_set(install_dir)
+ assert install_tree.issubset(exp), install_tree - exp
+
+ metadata = PathMetadata(egg_path, os.path.join(egg_path, 'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ assert dist.project_name == project_name
+ assert dist.version == version
+ if requires_txt is None:
+ assert not dist.has_metadata('requires.txt')
+ else:
+ # Order must match to ensure reproducibility.
+ assert requires_txt == dist.get_metadata('requires.txt').lstrip()
+
+
+class Record:
+ def __init__(self, id, **kwargs):
+ self._id = id
+ self._fields = kwargs
+
+ def __repr__(self) -> str:
+ return f'{self._id}(**{self._fields!r})'
+
+
+# Using Any to avoid possible type union issues later in test
+# making a TypedDict is not worth in a test and anonymous/inline TypedDict are experimental
+# https://github.com/python/mypy/issues/9884
+WHEEL_INSTALL_TESTS: tuple[dict[str, Any], ...] = (
+ dict(
+ id='basic',
+ file_defs={'foo': {'__init__.py': ''}},
+ setup_kwargs=dict(
+ packages=['foo'],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': {
+ 'EGG-INFO': ['PKG-INFO', 'RECORD', 'WHEEL', 'top_level.txt'],
+ 'foo': ['__init__.py'],
+ }
+ }),
+ ),
+ dict(
+ id='utf-8',
+ setup_kwargs=dict(
+ description='Description accentuée',
+ ),
+ ),
+ dict(
+ id='data',
+ file_defs={
+ 'data.txt': DALS(
+ """
+ Some data...
+ """
+ ),
+ },
+ setup_kwargs=dict(
+ data_files=[('data_dir', ['data.txt'])],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': {
+ 'EGG-INFO': ['PKG-INFO', 'RECORD', 'WHEEL', 'top_level.txt'],
+ 'data_dir': ['data.txt'],
+ }
+ }),
+ ),
+ dict(
+ id='extension',
+ file_defs={
+ 'extension.c': DALS(
+ """
+ #include "Python.h"
+
+ #if PY_MAJOR_VERSION >= 3
+
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "extension",
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+
+ #define INITERROR return NULL
+
+ PyMODINIT_FUNC PyInit_extension(void)
+
+ #else
+
+ #define INITERROR return
+
+ void initextension(void)
+
+ #endif
+ {
+ #if PY_MAJOR_VERSION >= 3
+ PyObject *module = PyModule_Create(&moduledef);
+ #else
+ PyObject *module = Py_InitModule("extension", NULL);
+ #endif
+ if (module == NULL)
+ INITERROR;
+ #if PY_MAJOR_VERSION >= 3
+ return module;
+ #endif
+ }
+ """
+ ),
+ },
+ setup_kwargs=dict(
+ ext_modules=[
+ Record(
+ 'setuptools.Extension', name='extension', sources=['extension.c']
+ )
+ ],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}-{platform}.egg': [
+ 'extension{shlib_ext}',
+ {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'top_level.txt',
+ ]
+ },
+ ]
+ }),
+ ),
+ dict(
+ id='header',
+ file_defs={
+ 'header.h': DALS(
+ """
+ """
+ ),
+ },
+ setup_kwargs=dict(
+ headers=['header.h'],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': [
+ 'header.h',
+ {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'top_level.txt',
+ ]
+ },
+ ]
+ }),
+ ),
+ dict(
+ id='script',
+ file_defs={
+ 'script.py': DALS(
+ """
+ #/usr/bin/python
+ print('hello world!')
+ """
+ ),
+ 'script.sh': DALS(
+ """
+ #/bin/sh
+ echo 'hello world!'
+ """
+ ),
+ },
+ setup_kwargs=dict(
+ scripts=['script.py', 'script.sh'],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'top_level.txt',
+ {'scripts': ['script.py', 'script.sh']},
+ ]
+ }
+ }),
+ ),
+ dict(
+ id='requires1',
+ install_requires='foobar==2.0',
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'requires.txt',
+ 'top_level.txt',
+ ]
+ }
+ }),
+ requires_txt=DALS(
+ """
+ foobar==2.0
+ """
+ ),
+ ),
+ dict(
+ id='requires2',
+ install_requires=f"""
+ bar
+ foo<=2.0; {sys.platform!r} in sys_platform
+ """,
+ requires_txt=DALS(
+ """
+ bar
+ foo<=2.0
+ """
+ ),
+ ),
+ dict(
+ id='requires3',
+ install_requires=f"""
+ bar; {sys.platform!r} != sys_platform
+ """,
+ ),
+ dict(
+ id='requires4',
+ install_requires="""
+ foo
+ """,
+ extras_require={
+ 'extra': 'foobar>3',
+ },
+ requires_txt=DALS(
+ """
+ foo
+
+ [extra]
+ foobar>3
+ """
+ ),
+ ),
+ dict(
+ id='requires5',
+ extras_require={
+ 'extra': f'foobar; {sys.platform!r} != sys_platform',
+ },
+ requires_txt=DALS(
+ """
+ [extra]
+ """
+ ),
+ ),
+ dict(
+ id='requires_ensure_order',
+ install_requires="""
+ foo
+ bar
+ baz
+ qux
+ """,
+ extras_require={
+ 'extra': """
+ foobar>3
+ barbaz>4
+ bazqux>5
+ quxzap>6
+ """,
+ },
+ requires_txt=DALS(
+ """
+ foo
+ bar
+ baz
+ qux
+
+ [extra]
+ foobar>3
+ barbaz>4
+ bazqux>5
+ quxzap>6
+ """
+ ),
+ ),
+ dict(
+ id='namespace_package',
+ file_defs={
+ 'foo': {
+ 'bar': {'__init__.py': ''},
+ },
+ },
+ setup_kwargs=dict(
+ namespace_packages=['foo'],
+ packages=['foo.bar'],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': [
+ 'foo-1.0-py{py_version}-nspkg.pth',
+ {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'namespace_packages.txt',
+ 'top_level.txt',
+ ]
+ },
+ {
+ 'foo': [
+ '__init__.py',
+ {'bar': ['__init__.py']},
+ ]
+ },
+ ]
+ }),
+ ),
+ dict(
+ id='empty_namespace_package',
+ file_defs={
+ 'foobar': {
+ '__init__.py': (
+ "__import__('pkg_resources').declare_namespace(__name__)"
+ )
+ },
+ },
+ setup_kwargs=dict(
+ namespace_packages=['foobar'],
+ packages=['foobar'],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': [
+ 'foo-1.0-py{py_version}-nspkg.pth',
+ {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'namespace_packages.txt',
+ 'top_level.txt',
+ ]
+ },
+ {
+ 'foobar': [
+ '__init__.py',
+ ]
+ },
+ ]
+ }),
+ ),
+ dict(
+ id='data_in_package',
+ file_defs={
+ 'foo': {
+ '__init__.py': '',
+ 'data_dir': {
+ 'data.txt': DALS(
+ """
+ Some data...
+ """
+ ),
+ },
+ }
+ },
+ setup_kwargs=dict(
+ packages=['foo'],
+ data_files=[('foo/data_dir', ['foo/data_dir/data.txt'])],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'top_level.txt',
+ ],
+ 'foo': [
+ '__init__.py',
+ {
+ 'data_dir': [
+ 'data.txt',
+ ]
+ },
+ ],
+ }
+ }),
+ ),
+)
+
+
+@pytest.mark.parametrize(
+ 'params',
+ WHEEL_INSTALL_TESTS,
+ ids=[params['id'] for params in WHEEL_INSTALL_TESTS],
+)
+def test_wheel_install(params):
+ project_name = params.get('name', 'foo')
+ version = params.get('version', '1.0')
+ install_requires = params.get('install_requires', [])
+ extras_require = params.get('extras_require', {})
+ requires_txt = params.get('requires_txt', None)
+ install_tree = params.get('install_tree')
+ file_defs = params.get('file_defs', {})
+ setup_kwargs = params.get('setup_kwargs', {})
+ with (
+ build_wheel(
+ name=project_name,
+ version=version,
+ install_requires=install_requires,
+ extras_require=extras_require,
+ extra_file_defs=file_defs,
+ **setup_kwargs,
+ ) as filename,
+ tempdir() as install_dir,
+ ):
+ _check_wheel_install(
+ filename, install_dir, install_tree, project_name, version, requires_txt
+ )
+
+
+def test_wheel_install_pep_503():
+ project_name = 'Foo_Bar' # PEP 503 canonicalized name is "foo-bar"
+ version = '1.0'
+ with (
+ build_wheel(
+ name=project_name,
+ version=version,
+ ) as filename,
+ tempdir() as install_dir,
+ ):
+ new_filename = filename.replace(project_name, canonicalize_name(project_name))
+ shutil.move(filename, new_filename)
+ _check_wheel_install(
+ new_filename,
+ install_dir,
+ None,
+ canonicalize_name(project_name),
+ version,
+ None,
+ )
+
+
+def test_wheel_no_dist_dir():
+ project_name = 'nodistinfo'
+ version = '1.0'
+ wheel_name = f'{project_name}-{version}-py2.py3-none-any.whl'
+ with tempdir() as source_dir:
+ wheel_path = os.path.join(source_dir, wheel_name)
+ # create an empty zip file
+ zipfile.ZipFile(wheel_path, 'w').close()
+ with tempdir() as install_dir:
+ with pytest.raises(ValueError):
+ _check_wheel_install(
+ wheel_path, install_dir, None, project_name, version, None
+ )
+
+
+def test_wheel_is_compatible(monkeypatch):
+ def sys_tags():
+ return {
+ (t.interpreter, t.abi, t.platform)
+ for t in parse_tag('cp36-cp36m-manylinux1_x86_64')
+ }
+
+ monkeypatch.setattr('setuptools.wheel._get_supported_tags', sys_tags)
+ assert Wheel('onnxruntime-0.1.2-cp36-cp36m-manylinux1_x86_64.whl').is_compatible()
+
+
+def test_wheel_mode():
+ @contextlib.contextmanager
+ def build_wheel(extra_file_defs=None, **kwargs):
+ file_defs = {
+ 'setup.py': (
+ DALS(
+ """
+ # -*- coding: utf-8 -*-
+ from setuptools import setup
+ import setuptools
+ setup(**%r)
+ """
+ )
+ % kwargs
+ ).encode('utf-8'),
+ }
+ if extra_file_defs:
+ file_defs.update(extra_file_defs)
+ with tempdir() as source_dir:
+ path.build(file_defs, source_dir)
+ runsh = pathlib.Path(source_dir) / "script.sh"
+ os.chmod(runsh, 0o777)
+ subprocess.check_call(
+ (sys.executable, 'setup.py', '-q', 'bdist_wheel'), cwd=source_dir
+ )
+ yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
+
+ params = dict(
+ id='script',
+ file_defs={
+ 'script.py': DALS(
+ """
+ #/usr/bin/python
+ print('hello world!')
+ """
+ ),
+ 'script.sh': DALS(
+ """
+ #/bin/sh
+ echo 'hello world!'
+ """
+ ),
+ },
+ setup_kwargs=dict(
+ scripts=['script.py', 'script.sh'],
+ ),
+ install_tree=flatten_tree({
+ 'foo-1.0-py{py_version}.egg': {
+ 'EGG-INFO': [
+ 'PKG-INFO',
+ 'RECORD',
+ 'WHEEL',
+ 'top_level.txt',
+ {'scripts': ['script.py', 'script.sh']},
+ ]
+ }
+ }),
+ )
+
+ project_name = params.get('name', 'foo')
+ version = params.get('version', '1.0')
+ install_tree = params.get('install_tree')
+ file_defs = params.get('file_defs', {})
+ setup_kwargs = params.get('setup_kwargs', {})
+
+ with (
+ build_wheel(
+ name=project_name,
+ version=version,
+ install_requires=[],
+ extras_require={},
+ extra_file_defs=file_defs,
+ **setup_kwargs,
+ ) as filename,
+ tempdir() as install_dir,
+ ):
+ _check_wheel_install(
+ filename, install_dir, install_tree, project_name, version, None
+ )
+ w = Wheel(filename)
+ base = pathlib.Path(install_dir) / w.egg_name()
+ script_sh = base / "EGG-INFO" / "scripts" / "script.sh"
+ assert script_sh.exists()
+ if sys.platform != 'win32':
+ # Editable file mode has no effect on Windows
+ assert oct(stat.S_IMODE(script_sh.stat().st_mode)) == "0o777"
diff --git a/videollama2/lib/python3.10/site-packages/setuptools/tests/text.py b/videollama2/lib/python3.10/site-packages/setuptools/tests/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..e05cc633ede9e5ce4f74b66a7bf76327c2000caa
--- /dev/null
+++ b/videollama2/lib/python3.10/site-packages/setuptools/tests/text.py
@@ -0,0 +1,4 @@
+class Filenames:
+ unicode = 'smörbröd.py'
+ latin_1 = unicode.encode('latin-1')
+ utf_8 = unicode.encode('utf-8')
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h
new file mode 100644
index 0000000000000000000000000000000000000000..8652ef0f2bfde8be21d2121f4a1cd551325462d9
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h
@@ -0,0 +1,183 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+namespace torch::cuda::CUDAPluggableAllocator {
+
+using MallocFuncType = void*(size_t, int, cudaStream_t);
+using FreeFuncType = void(void*, size_t, int, cudaStream_t);
+
+// A CUDAPluggableAllocatorDeleterContext object is used as the `ctx`
+// argument for DataPtr. We need context because a user can use
+// multiple allocators in the same PyTorch program, and
+// the allocators can have different free functions, such as:
+// free, cudaFree, cudaFreeAsync, ncclMemFree etc.
+struct TORCH_CUDA_CPP_API CUDAPluggableAllocatorDeleterContext {
+ explicit CUDAPluggableAllocatorDeleterContext(
+ std::function free_fn,
+ void* data,
+ size_t size,
+ int device,
+ cudaStream_t stream);
+
+ void free();
+
+ private:
+ std::function free_fn_;
+ void* data_;
+ size_t size_;
+ int device_;
+ cudaStream_t stream_;
+};
+
+#if defined(TORCH_HIP_VERSION)
+using streamType = c10::hip::HIPStream;
+#else
+using streamType = c10::cuda::CUDAStream;
+#endif
+
+TORCH_CUDA_CPP_API std::shared_ptr<
+ c10::cuda::CUDACachingAllocator::CUDAAllocator>
+getCurrentAllocator();
+TORCH_CUDA_CPP_API std::shared_ptr<
+ c10::cuda::CUDACachingAllocator::CUDAAllocator>
+createCustomAllocator(
+ std::function alloc_fn,
+ std::function free_fn);
+TORCH_CUDA_CPP_API void changeCurrentAllocator(
+ const std::shared_ptr&
+ allocator);
+
+struct _AllocationMetadata {
+ _AllocationMetadata();
+ _AllocationMetadata(
+ size_t size,
+ c10::DeviceIndex device_idx,
+ cudaStream_t stream);
+ size_t size;
+ c10::DeviceIndex device_idx;
+ cudaStream_t stream;
+};
+
+struct TORCH_CUDA_CPP_API CUDAPluggableAllocator
+ : public c10::cuda::CUDACachingAllocator::CUDAAllocator {
+ CUDAPluggableAllocator(
+ std::function alloc_fn,
+ std::function free_fn);
+
+ CUDAPluggableAllocator(CUDAPluggableAllocator& other);
+ CUDAPluggableAllocator& operator=(CUDAPluggableAllocator& other) = delete;
+
+ void set_init_fn(std::function init_fn);
+
+ void set_reset_fn(std::function reset_fn);
+
+ void set_memory_fraction_fn(
+ std::function memory_fraction_fn);
+
+ void set_base_alloc_fn(std::function base_alloc_fn);
+
+ void set_record_stream_fn(
+ std::function record_stream_fn);
+
+ void set_begin_allocate_to_pool(
+ std::function<
+ void(int, c10::cuda::MempoolId_t, std::function)>
+ capture_begin_fn);
+
+ void set_end_allocate_to_pool_fn(
+ std::function capture_about_to_end_fn);
+
+ void set_release_pool(
+ std::function capture_destroy_fn);
+
+ void* malloc(size_t size, c10::DeviceIndex device, cudaStream_t stream);
+
+ c10::DataPtr allocate(size_t size) override;
+ c10::DeleterFnPtr raw_deleter() const override;
+
+ void* raw_alloc(size_t nbytes) override;
+ void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) override;
+ void raw_delete(void* ptr) override;
+ void init(int device_count) override;
+ bool initialized() override;
+ void setMemoryFraction(double fraction, c10::DeviceIndex device) override;
+ void emptyCache() override;
+ void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) override;
+ void* getBaseAllocation(void* ptr, size_t* size) override;
+
+ void recordStream(const c10::DataPtr&, streamType stream) override;
+
+ c10::CachingDeviceAllocator::DeviceStats getDeviceStats(
+ c10::DeviceIndex device) override;
+ void resetAccumulatedStats(c10::DeviceIndex device) override;
+ void resetPeakStats(c10::DeviceIndex device) override;
+ c10::cuda::CUDACachingAllocator::SnapshotInfo snapshot() override;
+ void beginAllocateToPool(
+ c10::DeviceIndex device,
+ c10::cuda::MempoolId_t mempool_id,
+ std::function) override;
+ void endAllocateToPool(
+ c10::DeviceIndex device,
+ c10::cuda::MempoolId_t mempool_id) override;
+ void releasePool(c10::DeviceIndex device, c10::cuda::MempoolId_t mempool_id)
+ override;
+ std::shared_ptr getIpcDevPtr(std::string handle) override;
+ c10::cuda::CUDACachingAllocator::ShareableHandle shareIpcHandle(
+ void*) override;
+ void recordHistory(
+ bool enabled,
+ c10::cuda::CUDACachingAllocator::CreateContextFn context_recorder,
+ size_t alloc_trace_max_entries,
+ c10::cuda::CUDACachingAllocator::RecordContext when) override;
+ void attachOutOfMemoryObserver(
+ c10::cuda::CUDACachingAllocator::OutOfMemoryObserver observer) override;
+ void attachAllocatorTraceTracker(
+ c10::cuda::CUDACachingAllocator::AllocatorTraceTracker tracker) override;
+ std::shared_ptr
+ getCheckpointState(c10::DeviceIndex device, at::cuda::MempoolId_t id)
+ override;
+ c10::cuda::CUDACachingAllocator::CheckpointDelta setCheckpointPoolState(
+ c10::DeviceIndex device,
+ std::shared_ptr pps)
+ override;
+ void enablePeerAccess(c10::DeviceIndex dev, c10::DeviceIndex dev_to_access)
+ override;
+ cudaError_t memcpyAsync(
+ void* dst,
+ int dstDevice,
+ const void* src,
+ int srcDevice,
+ size_t count,
+ cudaStream_t stream,
+ bool p2p_enabled) override;
+ std::string name() override;
+ void copy_data(void* dest, const void* src, std::size_t count) const final;
+
+ protected:
+ std::function alloc_fn_;
+ std::function free_fn_;
+ std::function init_fn_;
+ std::function reset_fn_;
+ std::function memory_fraction_fn_;
+ std::function base_alloc_fn_;
+ std::function record_stream_fn_;
+ std::function<
+ void(int, c10::cuda::MempoolId_t, std::function)>
+ begin_allocate_to_pool_fn_;
+ std::function end_allocate_to_pool_fn_;
+ std::function relase_pool_fn_;
+ std::mutex allocator_mutex_;
+ // We do the bookeeping here in order to simplify custom allocators
+ std::unordered_map allocation_metadata_;
+
+ bool initialized_ = false;
+};
+} // namespace torch::cuda::CUDAPluggableAllocator
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h
new file mode 100644
index 0000000000000000000000000000000000000000..5c4d95b285997fa4ddfce57423e0b264dde5898a
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h
@@ -0,0 +1,18 @@
+#ifndef THCP_EVENT_INC
+#define THCP_EVENT_INC
+
+#include
+#include
+
+struct THCPEvent {
+ PyObject_HEAD at::cuda::CUDAEvent cuda_event;
+};
+extern PyObject* THCPEventClass;
+
+void THCPEvent_init(PyObject* module);
+
+inline bool THCPEvent_Check(PyObject* obj) {
+ return THCPEventClass && PyObject_IsInstance(obj, THCPEventClass);
+}
+
+#endif // THCP_EVENT_INC
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/GdsFile.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/GdsFile.h
new file mode 100644
index 0000000000000000000000000000000000000000..0edf927393db75c52c980de7fe73963dc237819b
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/GdsFile.h
@@ -0,0 +1,7 @@
+#ifndef THCP_GDSFILE_INC
+#define THCP_GDSFILE_INC
+
+#include
+
+void initGdsBindings(PyObject* module);
+#endif // THCP_GDSFILE_INC
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h
new file mode 100644
index 0000000000000000000000000000000000000000..0c89e4bc65f2591c074083064e64eca421ee5760
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h
@@ -0,0 +1,11 @@
+#ifndef THCP_CUDA_MODULE_INC
+#define THCP_CUDA_MODULE_INC
+
+PyObject* THCPModule_getDevice_wrap(PyObject* self);
+PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg);
+PyObject* THCPModule_getDeviceName_wrap(PyObject* self, PyObject* arg);
+PyObject* THCPModule_getDriverVersion(PyObject* self);
+PyObject* THCPModule_isDriverSufficient(PyObject* self);
+PyObject* THCPModule_getCurrentBlasHandle_wrap(PyObject* self);
+
+#endif
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h
new file mode 100644
index 0000000000000000000000000000000000000000..9b7197d74390c142744ec6d64df967b6c7f25903
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h
@@ -0,0 +1,20 @@
+#ifndef THCP_STREAM_INC
+#define THCP_STREAM_INC
+
+#include
+#include
+#include
+
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
+struct THCPStream : THPStream {
+ at::cuda::CUDAStream cuda_stream;
+};
+extern PyObject* THCPStreamClass;
+
+void THCPStream_init(PyObject* module);
+
+inline bool THCPStream_Check(PyObject* obj) {
+ return THCPStreamClass && PyObject_IsInstance(obj, THCPStreamClass);
+}
+
+#endif // THCP_STREAM_INC
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h
new file mode 100644
index 0000000000000000000000000000000000000000..697a66dc3ee91a22ae2503852db04dbba2fc74d4
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h
@@ -0,0 +1,10 @@
+#ifndef THCP_H
+#define THCP_H
+
+#include
+#include
+#include
+#include
+#include
+
+#endif
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h
new file mode 100644
index 0000000000000000000000000000000000000000..860629bcf2e9a3ec826d8be5b1c6c1364019a4c0
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h
@@ -0,0 +1,52 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+namespace torch::cuda {
+
+using tensor_list2d = std::vector>;
+
+TORCH_CUDA_CU_API std::vector& broadcast_out(
+ const at::Tensor& tensor,
+ std::vector& out_tensors);
+TORCH_CUDA_CU_API std::vector broadcast(
+ const at::Tensor& tensor,
+ at::IntArrayRef devices);
+TORCH_CUDA_CU_API tensor_list2d broadcast_coalesced(
+ at::TensorList tensors,
+ at::IntArrayRef devices,
+ size_t buffer_size);
+
+TORCH_CUDA_CU_API std::vector& scatter_out(
+ const at::Tensor& tensor,
+ std::vector& out_tensors,
+ int64_t dim = 0,
+ const std::optional>>&
+ streams = std::nullopt);
+
+TORCH_CUDA_CU_API std::vector scatter(
+ const at::Tensor& tensor,
+ at::IntArrayRef devices,
+ const std::optional>& chunk_sizes = std::nullopt,
+ int64_t dim = 0,
+ const std::optional>>&
+ streams = std::nullopt);
+
+TORCH_CUDA_CU_API at::Tensor& gather_out(
+ at::TensorList tensors,
+ at::Tensor& out_tensor,
+ int64_t dim);
+
+TORCH_CUDA_CU_API at::Tensor gather(
+ at::TensorList tensors,
+ int64_t dim,
+ std::optional destination_index);
+
+} // namespace torch::cuda
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h
new file mode 100644
index 0000000000000000000000000000000000000000..c533dae3baad36a42e0f97f55b9eb7a747191dc3
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h
@@ -0,0 +1,11 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace torch {
+
+using device_set = std::bitset;
+
+} // namespace torch
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h
new file mode 100644
index 0000000000000000000000000000000000000000..fe5699af416012cbd8a758939fa55f452fc953a4
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h
@@ -0,0 +1,27 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+namespace torch::cuda {
+
+// C++-only versions of these, for python use
+// those defined in cuda/Module.cpp which also record python state.
+TORCH_CUDA_CU_API void _record_memory_history(
+ bool enabled,
+ bool record_context = true,
+ int64_t trace_alloc_max_entries = 1,
+ bool trace_alloc_record_context = false,
+ bool record_cpp_context = false);
+
+TORCH_CUDA_CU_API void _record_memory_history(
+ std::optional enabled = "all",
+ std::optional context = "all",
+ const std::string& stacks = "all",
+ size_t max_entries = SIZE_MAX);
+
+TORCH_CUDA_CU_API std::string _memory_snapshot_pickled();
+
+} // namespace torch::cuda
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h
new file mode 100644
index 0000000000000000000000000000000000000000..1415cccc25ab947a2f2613253e3646525f0ec3b2
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h
@@ -0,0 +1,219 @@
+#pragma once
+
+#include
+#include
+
+#include
+#include
+#include
+
+// NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for
+// HIP 3.1+
+#if defined(__CUDA_BF16_TYPES_EXIST__)
+#define HAS_NCCL_BF16_DATATYPE \
+ ((NCCL_MAJOR > 2) || (NCCL_MAJOR == 2) && (NCCL_MINOR >= 10))
+#elif defined(USE_ROCM) && (TORCH_HIP_VERSION >= 301)
+#define HAS_NCCL_BF16_DATATYPE 1
+#else
+#define HAS_NCCL_BF16_DATATYPE 0
+#endif
+
+namespace torch::cuda::nccl {
+
+/* The following are copied from and redefined in torch::cuda::nccl
+ * namespace */
+/* pytorch should only use the following definition within pytorch scope */
+
+/* Opaque handle to communicator to ncclComm*, this will reinterpret as ncclComm
+ * in nccl.cpp */
+typedef void* ncclComm_t;
+
+/** redefine nccl unique ID in torch scope. this should be identical to native
+ * nccl impp. */
+#define NCCL_UNIQUE_ID_BYTES 128
+// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
+typedef struct {
+ char internal[NCCL_UNIQUE_ID_BYTES];
+} ncclUniqueId;
+
+/* Error type */
+enum class ncclResult {
+ Success = 0,
+ UnhandledCudaError = 1,
+ SystemError = 2,
+ InternalError = 3,
+ InvalidArgument = 4,
+ InvalidUsage = 5,
+ RemoteError = 6,
+ InProgress = 7,
+ NumResults = 8
+};
+
+/* Reduction operation selector */
+enum class ncclRedOp { Sum = 0, Prod = 1, Max = 2, Min = 3, NumOps = 4 };
+
+/* Data types */
+enum class ncclDataType {
+ Int8 = 0,
+ Char = 0,
+ Uint8 = 1,
+ Int32 = 2,
+ Int = 2,
+ Uint32 = 3,
+ Int64 = 4,
+ Uint64 = 5,
+ Float16 = 6,
+ Half = 6,
+ Float32 = 7,
+ Float = 7,
+ Float64 = 8,
+ Double = 8,
+ Bfloat16 = 9,
+ NumTypes = 10
+};
+
+// RAII helper class to manage NCCL group API and CUDA free mutex.
+// The destructor is allowed to throw since this helper class only
+// manages group and lock lifetimes.
+struct AutoNcclGroup {
+ AutoNcclGroup();
+ AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking);
+ ~AutoNcclGroup() noexcept(false);
+ ncclComm_t comm_;
+ bool comm_nonblocking_;
+};
+
+// NOTE: this is exposed only so that python_nccl.cpp can some of these helpers.
+// Don't use them outside of these files.
+namespace detail {
+
+TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status);
+
+inline void NCCL_CHECK(ncclResult status) {
+ if (status != ncclResult::Success) {
+ throw_nccl_error(status);
+ }
+}
+
+TORCH_CUDA_CPP_API at::ArrayRef get_communicators(
+ at::TensorList inputs);
+TORCH_CUDA_CPP_API void check_inputs(
+ at::TensorList inputs,
+ at::TensorList outputs,
+ int input_multiplier,
+ int output_multiplier);
+TORCH_CUDA_CPP_API void check_inputs(
+ at::TensorList inputs,
+ const at::Tensor& output,
+ int root,
+ int input_multiplier,
+ int output_multiplier);
+
+} // namespace detail
+
+using comm_list = std::vector;
+using stream_list = std::vector>;
+
+TORCH_CUDA_CPP_API std::uint64_t version();
+TORCH_CUDA_CPP_API const char* version_suffix();
+
+bool is_available(at::TensorList tensors);
+
+TORCH_CUDA_CPP_API void get_unique_id(ncclUniqueId& id);
+TORCH_CUDA_CPP_API ncclComm_t
+comm_init_rank(int nranks, const ncclUniqueId& comm_id, int rank);
+TORCH_CUDA_CPP_API void comm_destroy(ncclComm_t comm);
+
+TORCH_CUDA_CPP_API void broadcast(
+ at::TensorList tensors,
+ const stream_list& streams = {},
+ const comm_list& user_comms = {});
+
+size_t get_max_count();
+
+TORCH_CUDA_CPP_API void reduce(
+ const std::vector& inputs,
+ at::Tensor& output,
+ int32_t root = 0,
+ int32_t op = static_cast(ncclRedOp::Sum),
+ const stream_list& streams = {},
+ const comm_list& user_comms = {});
+
+TORCH_CUDA_CPP_API void reduce(
+ std::vector& inputs,
+ int32_t root = 0,
+ int32_t op = static_cast(ncclRedOp::Sum),
+ const stream_list& streams = {},
+ const comm_list& user_comms = {});
+
+TORCH_CUDA_CPP_API void all_reduce(
+ const std::vector& inputs,
+ std::vector& outputs,
+ int32_t op = static_cast(ncclRedOp::Sum),
+ const stream_list& streams = {},
+ const comm_list& user_comms = {});
+
+TORCH_CUDA_CPP_API void reduce_scatter(
+ const std::vector& inputs,
+ std::vector& outputs,
+ int32_t op = static_cast(ncclRedOp::Sum),
+ const stream_list& streams = {},
+ const comm_list& user_comms = {});
+
+TORCH_CUDA_CPP_API void scatter(
+ const std::vector& inputs,
+ at::Tensor& outputs,
+ ncclComm_t comm,
+ at::cuda::CUDAStream& stream,
+ int32_t root = 0);
+
+TORCH_CUDA_CPP_API void all_gather(
+ const std::vector& inputs,
+ std::vector& outputs,
+ const stream_list& streams = {},
+ const comm_list& user_comms = {});
+
+TORCH_CUDA_CPP_API void gather(
+ const at::Tensor& inputs,
+ std::vector& outputs,
+ ncclComm_t comm,
+ at::cuda::CUDAStream& stream,
+ int32_t root = 0);
+
+TORCH_CUDA_CPP_API void all2all_single_equal_split(
+ at::Tensor& input,
+ at::Tensor& output,
+ int size,
+ ncclComm_t comm,
+ at::cuda::CUDAStream& stream);
+
+TORCH_CUDA_CPP_API void all2all_single_unequal_split(
+ void* sendbuff,
+ const size_t* sendcounts,
+ const size_t* senddispls,
+ void* recvbuff,
+ const size_t* recvcounts,
+ const size_t* recvdispls,
+ size_t size,
+ c10::ScalarType type,
+ ncclComm_t comm,
+ at::cuda::CUDAStream& stream);
+
+TORCH_CUDA_CPP_API void all2all(
+ std::vector& outputTensors,
+ std::vector& inputTensors,
+ ncclComm_t _comm,
+ at::cuda::CUDAStream& stream);
+
+TORCH_CUDA_CPP_API void send(
+ const at::Tensor& input,
+ ncclComm_t comm,
+ at::cuda::CUDAStream stream,
+ int dst);
+
+TORCH_CUDA_CPP_API void recv(
+ at::Tensor& output,
+ ncclComm_t comm,
+ at::cuda::CUDAStream stream,
+ int src);
+} // namespace torch::cuda::nccl
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h
new file mode 100644
index 0000000000000000000000000000000000000000..e87ae053fbe7fc59d1713cf8b148dfe52cbd01dc
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h
@@ -0,0 +1,7 @@
+#pragma once
+
+namespace torch::cuda::python {
+
+void initCommMethods(PyObject* module);
+
+} // namespace torch::cuda::python
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h
new file mode 100644
index 0000000000000000000000000000000000000000..ebaa666a22d2cff60e2ef2a2701003d0ca61a8e4
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include
+
+PyObject* THCPModule_nccl_version(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_version_suffix(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_unique_id(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_init_rank(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_all_reduce(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_broadcast(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_all_gather(PyObject* self, PyObject* args);
+PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args);
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h
new file mode 100644
index 0000000000000000000000000000000000000000..6282e53f4bff1773e7f8a8249e444c8e5da45d87
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h
@@ -0,0 +1,126 @@
+#pragma once
+#include
+#include
+#include
+#include
+#include
+
+// `TorchScript` offers a simple logging facility that can enabled by setting an
+// environment variable `PYTORCH_JIT_LOG_LEVEL`.
+
+// Logging is enabled on a per file basis. To enable logging in
+// `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be
+// set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination`
+// (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`).
+
+// Multiple files can be logged by separating each file name with a colon `:` as
+// in the following example,
+// `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination`
+
+// There are 3 logging levels available for your use ordered by the detail level
+// from lowest to highest.
+
+// * `GRAPH_DUMP` should be used for printing entire graphs after optimization
+// passes
+// * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e.
+// node deletion, constant folding, etc)
+// * `GRAPH_DEBUG` should be used for providing information useful for debugging
+// the internals of a particular optimization pass or analysis
+
+// The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP`
+// statements will be enabled when one specifies a file(s) in
+// `PYTORCH_JIT_LOG_LEVEL`.
+
+// `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in
+// `>alias_analysis`.
+// `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in
+// `>>alias_analysis`.
+// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there
+// is no logging level that is higher than `GRAPH_DEBUG`.
+
+namespace torch::jit {
+
+struct Node;
+struct Graph;
+
+enum class JitLoggingLevels {
+ GRAPH_DUMP = 0,
+ GRAPH_UPDATE,
+ GRAPH_DEBUG,
+};
+
+TORCH_API std::string get_jit_logging_levels();
+
+TORCH_API void set_jit_logging_levels(std::string level);
+
+TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream);
+
+TORCH_API std::ostream& get_jit_logging_output_stream();
+
+TORCH_API std::string getHeader(const Node* node);
+
+TORCH_API std::string log_function(const std::shared_ptr& graph);
+
+TORCH_API ::torch::jit::JitLoggingLevels jit_log_level();
+
+// Prefix every line in a multiline string \p IN_STR with \p PREFIX.
+TORCH_API std::string jit_log_prefix(
+ const std::string& prefix,
+ const std::string& in_str);
+
+TORCH_API std::string jit_log_prefix(
+ ::torch::jit::JitLoggingLevels level,
+ const char* fn,
+ int l,
+ const std::string& in_str);
+
+TORCH_API bool is_enabled(
+ const char* cfname,
+ ::torch::jit::JitLoggingLevels level);
+
+TORCH_API std::ostream& operator<<(
+ std::ostream& out,
+ ::torch::jit::JitLoggingLevels level);
+
+#define JIT_LOG(level, ...) \
+ if (is_enabled(__FILE__, level)) { \
+ ::torch::jit::get_jit_logging_output_stream() \
+ << ::torch::jit::jit_log_prefix( \
+ level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \
+ }
+
+// tries to reconstruct original python source
+#define SOURCE_DUMP(MSG, G) \
+ JIT_LOG( \
+ ::torch::jit::JitLoggingLevels::GRAPH_DUMP, \
+ MSG, \
+ "\n", \
+ ::torch::jit::log_function(G));
+// use GRAPH_DUMP for dumping graphs after optimization passes
+#define GRAPH_DUMP(MSG, G) \
+ JIT_LOG( \
+ ::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString());
+// use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion,
+// constant folding, CSE)
+#define GRAPH_UPDATE(...) \
+ JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__);
+// use GRAPH_DEBUG to provide information useful for debugging a particular opt
+// pass
+#define GRAPH_DEBUG(...) \
+ JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__);
+// use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script
+#define GRAPH_EXPORT(MSG, G) \
+ JIT_LOG( \
+ ::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \
+ MSG, \
+ "\n\n", \
+ (G)->toString(), \
+ "");
+
+#define GRAPH_DUMP_ENABLED \
+ (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP))
+#define GRAPH_UPDATE_ENABLED \
+ (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE))
+#define GRAPH_DEBUG_ENABLED \
+ (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG))
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h
new file mode 100644
index 0000000000000000000000000000000000000000..c013431a19cd3e3a6773f4b36dc6627ddc3d21a7
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h
@@ -0,0 +1,37 @@
+#pragma once
+#include
+#include
+#include
+
+// `TorchScript` offers a simple optimization limit checker
+// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`.
+// The purpose is to limit how many optimization you can make per pass.
+// This is useful for debugging any passes.
+
+// Opt limit checker is enabled on a per file basis (hence per pass). For
+// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set
+// to `constant_propagation=` or, simply, to
+// `constant_propagation=` where is the number of
+// optimizations you want to make for the pass. (i.e.
+// `PYTORCH_JIT_OPT_LIMIT="constant_propagation="`).
+
+// Multiple files can be configured by separating each file name with a colon
+// `:` as in the following example,
+// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=:dead_code_elimination="`
+
+// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if
+// we haven't reached the optimization limit yet. Otherwise, it will return
+// false. Typical usage:
+
+// if (!JIT_OPT_ALLOWED) {
+// GRAPH_DUMP(...); //supplied from jit_log
+// return;
+// }
+
+namespace torch::jit {
+
+TORCH_API bool opt_limit(const char* pass_name);
+
+#define JIT_OPT_ALLOWED opt_limit(__FILE__)
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h
new file mode 100644
index 0000000000000000000000000000000000000000..6495e1eaed5838bc5ae742739195e4ae1ca47919
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API bool AddIfThenElseOp(std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h
new file mode 100644
index 0000000000000000000000000000000000000000..46d941aabab22a47b5b4eb7c145789c1ee503426
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h
@@ -0,0 +1,20 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API std::shared_ptr Canonicalize(
+ const std::shared_ptr& graph,
+ bool keep_unique_names = true);
+
+TORCH_API void CanonicalizeOutputs(std::shared_ptr& graph);
+
+TORCH_API std::optional firstOrLastUse(Value* v, bool find_first);
+
+TORCH_API bool isBeforeOrAfter(
+ const Use& a,
+ const Use& b,
+ bool checking_before);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..6e95cc199d2b261e04bdcd52d4a95ffdaceb4a01
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API void CanonicalizeOps(const std::shared_ptr& graph);
+
+}
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h
new file mode 100644
index 0000000000000000000000000000000000000000..a2c2280d5d8d147dd05ccc19b292faf66fa6f369
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h
@@ -0,0 +1,10 @@
+
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API void CheckStrictFusion(std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h
new file mode 100644
index 0000000000000000000000000000000000000000..040ade790be6b0ed04c6e13d641d22cee7be1730
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Eliminates common inputs among `aten::cat` ops.
+TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr& graph);
+
+// Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
+// in the buffers used for concatenation if possible.
+TORCH_API void ExpandConcatAndEliminateRedundancy(
+ const std::shared_ptr& graph);
+
+TORCH_API bool CombineConcats(const std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h
new file mode 100644
index 0000000000000000000000000000000000000000..858da81458ba9edf61437ed3c89915307c51d9bc
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h
@@ -0,0 +1,30 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Runs constant propagation on all objects unless ignore_custom_classes is
+// specified as true, in which case user defined classes are skipped. This is
+// useful to prevent early fusion of packing operations, which end up lowering
+// away information about their constructors (e.g. packed::linear_clamp_prepack
+// and prepacked::conv2d_clamp_prepack)
+// Returns True if the pass made a change to the graph
+TORCH_API bool ConstantPropagation(
+ std::shared_ptr& graph,
+ bool ignore_custom_classes = false);
+
+// runs constant propagation only on ops that have non-aliasing inputs & outputs
+// Returns True if the pass made a change to the graph
+TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr& graph);
+
+// Runs the node if its inputs are constants. Callers of this function must
+// make their own determination if constant prop is appropriate - for example
+// non-deterministic ops or ops with side effects. If ignore_custom_classes is
+// specified, nodes that output user defined classes are not run.
+TORCH_API std::optional runNodeIfInputsAreConstant(
+ const Node* node,
+ bool ignore_custom_classes = false,
+ AliasDb* db = nullptr);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h
new file mode 100644
index 0000000000000000000000000000000000000000..49a9ae52378cb2574dd55c9bf9040760b28c5ca5
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h
@@ -0,0 +1,12 @@
+#pragma once
+
+#include
+#include
+
+namespace torch::jit {
+
+TORCH_API void CreateFunctionalGraphs(const std::shared_ptr& graph);
+
+TORCH_API void InlineFunctionalGraphs(const std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..a1a20d5e1e714e707242012adc8e5c04470c2aed
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API void DecomposeOps(std::shared_ptr& graph);
+
+}
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..847b5e60c95efb904fa8b6bf3bf54ea394f02927
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h
@@ -0,0 +1,21 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Erase NumberType information. This is necessary for and only used in
+// exporting to ONNX. This pass ensures that no remaining Values have
+// NumberType types, replacing them with tensors.
+// The following things are done to erase NumberType info:
+// - NumberType outputs are changed to DynamicType.
+// - prim::Constant nodes which are numbers get changed into 0-dim tensors of
+// the corresponding type
+// - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
+// are erased.
+//
+// The pass assumes that DCE will be called sometime after.
+TORCH_API void EraseNumberTypes(const std::shared_ptr& graph);
+TORCH_API void EraseNumberTypesOnBlock(Block* block);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h
new file mode 100644
index 0000000000000000000000000000000000000000..8061e9e78005e77e73377cf2481c531ee1082748
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h
@@ -0,0 +1,45 @@
+#pragma once
+
+#include
+#include
+
+namespace torch::jit {
+
+// Directly after tracing, we have an ill-formed graph with blocks inserted.
+// Example:
+//
+// graph(%self : ClassType,
+// %input.1 : Float(3, 4)):
+// %1 : ClassType = prim::GetAttr[name="relu1"](%self)
+// %2 : ClassType = prim::GetAttr[name="relu2"](%self)
+// %3 : ClassType = prim::GetAttr[name="rrr"](%2)
+// = prim::TracedModuleForward[scope="__module.relu1"]()
+// block0():
+// %input : Float(3, 4) = aten::relu(%input.1),
+// -> ()
+// = prim::TracedModuleForward[scope="__module.relu2"](),
+// block0():
+// = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
+// block0():
+// %6 : Float(3, 4) = aten::relu(%input),
+// -> ()
+// -> ()
+// return (%6)
+//
+// In this pass, we:
+// 1) Lift Value defs to as high of a scope as needed to ensure that
+// they dominate all their uses. For example, `input` in the above
+// graph needs to be lifted to the top-level block so that its use
+// in the second `relu` operator is dominated.
+// 2) Lambda lift the blocks. This ensures that all values used within
+// each scope have their defs captured.
+// 3) Convert the scope blocks into methods on their respective Modules,
+// and convert TracedModuleForward nodes to CallMethod nodes into those
+// methods.
+//
+// Then, we'll have a well-formed graph with proper method calls.
+TORCH_API void FixupTraceScopeBlocks(
+ std::shared_ptr& graph,
+ Module* self);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h
new file mode 100644
index 0000000000000000000000000000000000000000..adbd8f357f1e55fc5dafd890e7357ded6c1c2071
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Converts operators & their parameters to mkldnn if it is profitable
+// Currently encompassing Conv2d and Conv3d, and Linear
+// Op must be in float32 and mkldnn must be built
+// This pass only works on frozen graph
+TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h
new file mode 100644
index 0000000000000000000000000000000000000000..528220875c08f66a403c623577c9423a76dcd1f8
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// This pass removes 'grad_of' nodes, replacing them with conditionals of
+// the form:
+// if any_defined(inputs):
+// outputs =
+// else:
+// outputs = undefineds
+TORCH_API void LowerGradOf(Graph& g);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h
new file mode 100644
index 0000000000000000000000000000000000000000..48308d122f6e0a26d395d1c66337ef616ffa896e
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h
@@ -0,0 +1,20 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+using ModulePtr = c10::intrusive_ptr;
+
+// Given a graph with of a method which first argument is %self, lower it to a
+// graph where all attributes accesses are replaced with explicit inputs of the
+// graph (rather than results of prim::GetAttr executed on %self).
+//
+// Returns a tuple (graph, parameters) where the last module.parameters.size()
+// inputs to the graph are the trainable parameters used in this method. The
+// remaining inputs are the true inputs to the function.
+TORCH_API std::pair, std::vector> LowerGraph(
+ Graph& graph,
+ const ModulePtr& self);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h
new file mode 100644
index 0000000000000000000000000000000000000000..b51f29f0de714c56f71bd0cc4ff70b873bd11bc1
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h
@@ -0,0 +1,32 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+#if AT_MKLDNN_ENABLED()
+
+#include
+
+#endif // AT_MKLDNN_ENABLED()
+
+namespace torch::jit {
+
+#if AT_MKLDNN_ENABLED()
+
+namespace mkldnn {
+
+const static std::map>
+ fusion_rewrite_map = {
+ {"none", {}},
+ {"relu", {}},
+};
+
+} // namespace mkldnn
+
+#endif // AT_MKLDNN_ENABLED()
+
+void FuseConvWithEltwise(std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..885076584427546e6cd92525fb9dc3195ac1ed8e
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h
@@ -0,0 +1,16 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// This pass converts aten ops to a normalized form. It is
+// run immediately after IR generation in both the tracer and compiler,
+// so downstream consumers of the IR do not need handle ops in their
+// pre-normalized form.
+// Currently only handles normalization of op aliases.
+TORCH_API void NormalizeOps(const std::shared_ptr& graph);
+
+const std::unordered_map& getOperatorAliasMap();
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h
new file mode 100644
index 0000000000000000000000000000000000000000..efb19de59b5306ad09d096c8ee332204865c59e9
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h
@@ -0,0 +1,134 @@
+#pragma once
+
+#include
+
+/* `getCustomPrePasses()` returns a vector of passes that will be executed
+ * after differentiation but before any fusion. This is the de-facto location
+ * for compiler backends to insert passes.
+ *
+ * `getCustomPostPasses()` returns a vector of passes that will be
+ * executed after differentiation and after fusion (if any). This is the
+ * location for fusion cleanup passes if they are needed.
+ *
+ * Static registration of a pass can be done by creating a global
+ * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit.
+ *
+ * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which
+ * modify the IR graph in place.
+ */
+
+namespace torch::jit {
+
+// A pass modifies a Graph in place.
+using GraphPass = std::function&)>;
+
+// Since Passes are std::functions, we associate a UUID to each pass, this way
+// if we want to deregister a pass, we have something to reference it by.
+using GraphPassNameType = unsigned int;
+
+// Graph pass entries have a name associated with them
+using GraphPassEntry = std::pair;
+
+// Return currently registered passes. Passes are stored in a static vector
+TORCH_API std::vector>&
+getCustomPostPasses();
+TORCH_API std::vector>&
+getCustomPrePasses();
+
+TORCH_API GraphPassNameType registerPostPass(GraphPass p);
+TORCH_API GraphPassNameType registerPrePass(GraphPass p);
+
+// Look up pass by name passed in, remove it from registered passes
+TORCH_API void clearPostPass(GraphPassNameType p);
+TORCH_API void clearPrePass(GraphPassNameType p);
+
+// Remove all passes
+TORCH_API void clearAllPostPasses();
+TORCH_API void clearAllPrePasses();
+
+// LEGACY CALL
+struct TORCH_API RegisterPostPass {
+ RegisterPostPass(GraphPass p);
+};
+
+using RegisterPass = RegisterPostPass;
+
+/*
+ * PassManager is a wrapper on the register/clear PostPass functions above. It
+ * will register the pass provided in "registerPass" and will hold on to its
+ * associated name that way clearPass can be later called and will delete the
+ * pass used to register when called.
+ *
+ * PassManager is templated because we want static variables based on a
+ * particular GraphPass. When deriving from PassManager, you should send as the
+ * template parameter your derived class as you would for the curiously
+ * recurring template pattern. This template parameter isn't actually used and
+ * is simply done to prevent static members from being shared across derived
+ * types.
+ */
+template
+struct C10_EXPORT PassManager {
+ private:
+ // We want this class to be abstract because it's
+ virtual void abstract() = 0;
+
+ protected:
+ /*
+ * isRegistered() will return if a pass has been registered
+ * isRegistered(true) will change the value of the internal static bool
+ *
+ * There's an internal static bool to this function to keep track of the
+ * state, this is so when functions are derived from this class, they don't
+ * have to worry about initializing the static members.
+ */
+ static bool isRegistered(bool flip_bit = false) {
+ static bool val = false;
+ if (flip_bit)
+ val = !val;
+ return val;
+ }
+
+ /*
+ * name() will return the name of the registered pass
+ * name(pass_name, true) will set the name of the pass
+ * Similarly to isRegistered we use an internal static variable to hold the
+ * name.
+ */
+ static GraphPassNameType passID(
+ GraphPassNameType PassID = 0,
+ bool set = false) {
+ static GraphPassNameType pass_id = 0;
+ if (set)
+ pass_id = PassID;
+ return pass_id;
+ }
+
+ public:
+ // registerPass(pass) will register the pass provided and set the
+ // name/isRegistered functions appropriately, it returns a bool value
+ // indicating whether the given pass is already registered previously.
+ static bool registerPass(GraphPass p) {
+ if (!isRegistered()) {
+ // If we don't already have a registered pass, register pass
+ // hold on to its name, change isRegistered to true
+ passID(registerPostPass(std::move(p)), true);
+ isRegistered(true);
+ return false;
+ }
+ return true;
+ }
+
+ // Calls ClearPostPass(passID())
+ static void clearPass() {
+ // If the pass is registered, clear it and change isRegistered to false.
+ if (isRegistered()) {
+ clearPostPass(passID());
+ isRegistered(true);
+ }
+ }
+
+ // clang-tidy requires virtual destructor;
+ virtual ~PassManager() = default;
+};
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h
new file mode 100644
index 0000000000000000000000000000000000000000..d98b8ac58a1264218b08e93a454e9748279c086b
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Peephole Optimizes alias sensitive peepholes
+// Currently this is invoked as part of PeepholeOptimize
+// return true if graph is modified
+// Optimizes on TensorType if shape_peepholes is true
+TORCH_API bool PeepholeOptimizeAliasSensitive(
+ const std::shared_ptr& graph,
+ bool shape_peepholes);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h
new file mode 100644
index 0000000000000000000000000000000000000000..49b8750b72ce8591deca5617654c91ab61d30cb3
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h
@@ -0,0 +1,10 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// updates the types of tuples according to the type of their current inputs.
+TORCH_API void RefineTupleTypes(std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h
new file mode 100644
index 0000000000000000000000000000000000000000..483649d0e918c00cbd47b262110e3cbe24e42a11
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API void RemoveExpands(const std::shared_ptr& graph);
+
+}
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h
new file mode 100644
index 0000000000000000000000000000000000000000..4f13698c8810608e31124679a6a07fae440e55c8
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h
@@ -0,0 +1,81 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+#include
+
+namespace torch::jit {
+
+struct TORCH_API MutationRemover {
+ MutationRemover(
+ std::shared_ptr graph,
+ std::optional> mutation_filter = std::nullopt)
+ : mutation_filter_(std::move(mutation_filter)),
+ aliasDb_(nullptr),
+ graph_(std::move(graph)) {}
+
+ // return true if graph is modified
+ bool removeListMutation();
+
+ // return true if graph is modified
+ bool removeTensorMutation();
+
+ bool isSpecialMappedOp(Node* n) {
+ return n->matches("aten::zero_(Tensor(a!) self) -> Tensor(a!)") ||
+ n->matches(
+ "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)") ||
+ n->matches(
+ "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)");
+ }
+
+ bool inplaceOpVariant(Node* n);
+
+ static bool hasSideEffectOrAlias(Value* v, AliasDb* aliasDb);
+
+ private:
+ Node* createSpecialMappedOp(Node* n);
+ bool listMutationFollowingListConstruct(Node* n);
+ bool tryMakeCreationAndMutationAtomic(
+ Value* mutated_value,
+ Node* mutating_op);
+ bool tryMakeUnaliasedIfOutputAndMutationAtomic(
+ Value* mutated_value,
+ Node* mutating_op);
+ // return true if graph is modified
+ bool RemoveListMutation(Block* block);
+ // return true if graph is modified
+ bool RemoveTensorMutation(Block* block);
+
+ AliasDb* getOrCreateAliasDb() {
+ if (!aliasDb_) {
+ aliasDb_ = std::make_unique(graph_);
+ }
+ return aliasDb_.get();
+ }
+
+ std::optional> mutation_filter_;
+ std::unique_ptr aliasDb_ = nullptr;
+ std::shared_ptr graph_;
+};
+
+// Removes list mutation with functional equivalents
+// return true if graph is modified
+TORCH_API bool RemoveListMutation(const std::shared_ptr& graph);
+
+// Replaces in-place aten ops with their functional equivalents
+// when it can be proven that this does not change graph semantics
+// if `mutation_filter` is present, the pass will only attempt to
+// remove mutation on nodes which return true for the filter
+// return true if graph is modified
+TORCH_API bool RemoveTensorMutation(
+ const std::shared_ptr& graph,
+ std::optional> mutation_filter = std::nullopt);
+
+// Replaces in-place aten activation ops with their functional equivalence
+TORCH_API bool InplaceToFunctionalActivation(
+ const std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h
new file mode 100644
index 0000000000000000000000000000000000000000..0360bdf2092e571113847827592cbd6edba1fb1d
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+TORCH_API void RemoveRedundantProfiles(std::shared_ptr& graph);
+TORCH_API void RemoveRedundantProfiles(Block* block, AliasDb& db);
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h
new file mode 100644
index 0000000000000000000000000000000000000000..ec702fe6416edf20bfb8119cd8a1256f8aff32d5
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h
@@ -0,0 +1,14 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Find the valid upgrader graph for the upgrader and cache the result
+// for later lookups. Will error out if there is no valid upgrader graph
+// provided for the upgrader name.
+std::shared_ptr getUpgraderGraph(const std::string& upgrader_name);
+
+TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h
new file mode 100644
index 0000000000000000000000000000000000000000..bd60e4e249dbf5d550e4156760aa80b0830112c0
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h
@@ -0,0 +1,14 @@
+#pragma once
+
+#include
+
+#include
+
+namespace torch::jit {
+
+struct Graph;
+struct ArgumentSpec;
+
+TORCH_API void PropagateRequiresGrad(std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h
new file mode 100644
index 0000000000000000000000000000000000000000..0f056fb5082064d23fef6278287b59e987d3fafb
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h
@@ -0,0 +1,56 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+namespace torch::jit {
+
+// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
+
+TORCH_API void PropagateShapesOnGraph(std::shared_ptr& graph);
+
+// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
+// From [beg, end) attempt to propagate shapes and
+// build up a graph that will compute all remaining symbolic
+// shapes in [beg, end) that can be executed before beg
+
+struct ShapeComputeGraphMapping {
+ ShapeComputeGraphMapping(
+ std::shared_ptr partial_eval_shape_graph,
+ std::unordered_map
+ enclosing_graph_value_to_shape_graph_input,
+ std::unordered_map graph_output_to_symbolic_shape_dim)
+ : partial_eval_shape_graph(std::move(partial_eval_shape_graph)),
+ enclosing_graph_value_to_shape_graph_input_(
+ std::move(enclosing_graph_value_to_shape_graph_input)),
+ graph_output_to_symbolic_shape_dim_(
+ std::move(graph_output_to_symbolic_shape_dim)){};
+
+ std::shared_ptr partial_eval_shape_graph;
+ std::unordered_map
+ enclosing_graph_value_to_shape_graph_input_;
+ std::unordered_map graph_output_to_symbolic_shape_dim_;
+};
+
+TORCH_API std::optional
+PropagateShapesAndBuildLargeShapeComputeGraph(
+ std::shared_ptr& graph,
+ Node* beg,
+ Node* end);
+
+// don't insert complete tensor shapes in shape compute graphs and instead
+// rely on our partial evaluation pipeline to propagate information.
+// this is a good proxy for our ability to propagate non-complete shape
+// information.
+TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value);
+TORCH_API bool symbolicShapeAnalysisTestModeEnabled();
+
+using SSAInput = std::variant;
+TORCH_API std::optional>
+calculateSymbolicShapesOnOp(
+ const FunctionSchema* schema,
+ const std::vector& inputs);
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..afc033a57263bf6715a4a59a9192d739b3fb91cb
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h
@@ -0,0 +1,29 @@
+#pragma once
+
+#include
+
+namespace torch::jit {
+
+// Try to replace an op that takes a list input with another op that takes a
+// variadic number of arguments.
+TORCH_API bool UseVariadicOp(
+ const std::shared_ptr& graph,
+ NodeKind op,
+ NodeKind variadic_op);
+
+TORCH_API bool RemoveListMutationAndUseVariadicOp(
+ const std::shared_ptr& graph,
+ NodeKind op,
+ NodeKind variadic_op);
+
+// Convenient functions for replacing aten::stack/aten::cat with their
+// variadic versions.
+TORCH_API bool UseVariadicCat(const std::shared_ptr& graph);
+TORCH_API bool RemoveListMutationAndUseVariadicCat(
+ const std::shared_ptr& graph);
+
+TORCH_API bool UseVariadicStack(const std::shared_ptr& graph);
+TORCH_API bool RemoveListMutationAndUseVariadicStack(
+ const std::shared_ptr& graph);
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h
new file mode 100644
index 0000000000000000000000000000000000000000..a78022a0c6fe07645a2855ad17a50f3b0319b766
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h
@@ -0,0 +1,25 @@
+#pragma once
+#include
+
+namespace torch::jit {
+
+class ResourceGuard {
+ std::function _destructor;
+ bool _released{false};
+
+ public:
+ ResourceGuard(std::function destructor)
+ : _destructor(std::move(destructor)) {}
+
+ // NOLINTNEXTLINE(bugprone-exception-escape)
+ ~ResourceGuard() {
+ if (!_released)
+ _destructor();
+ }
+
+ void release() {
+ _released = true;
+ }
+};
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/callstack_debug_info_serialization.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/callstack_debug_info_serialization.h
new file mode 100644
index 0000000000000000000000000000000000000000..6d09bf56b2c1d295feae30eb6054d017c7c3e9c6
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/callstack_debug_info_serialization.h
@@ -0,0 +1,89 @@
+#pragma once
+
+#include
+#include
+#include
+
+#include
+
+#include
+
+#include
+
+namespace c10 {
+struct IValue;
+}
+
+namespace torch::jit {
+
+class Pickler;
+class InlinedCallStackSerializer {
+ public:
+ // Serialize InlinedCallStack as
+ // SerializedInlinedCallStack =
+ // [module_info, source range tag, SerializedInlinedCallStack]
+ // module_info = [ClassType.qualifiedName, instance_name]
+ // source_range_tag = unique source range id
+ c10::IValue serialize(
+ const InlinedCallStackPtr& cs_ptr,
+ const SourceRangeTagMap& source_range_tags);
+
+ private:
+ // module_info = [ClassType.qualifiedName, instance_name]
+ c10::IValue serialize_module_instance_info(
+ const std::optional& m);
+
+ // This caches serialized inlined callstack ptr, since many
+ // InlinedCallStackPtr can refer to the same one.
+ ska::flat_hash_map
+ serialized_inlined_callstack_;
+ // This caches serialized module instance info.
+ // There might be many nodes that are part of the same
+ // parent, grandparent etc. module.
+ ska::flat_hash_map serialized_module_instance_info_;
+};
+
+class TORCH_API CallStackDebugInfoPickler {
+ public:
+ CallStackDebugInfoPickler() = default;
+
+ std::vector pickle(
+ const std::unordered_map& callstack_ptrs,
+ const SourceRangeTagMap& source_range_tags);
+
+ private:
+ InlinedCallStackSerializer css_;
+};
+
+class InlinedCallStackDeserializer {
+ public:
+ InlinedCallStackPtr deserialize(
+ const c10::IValue& iv,
+ const ska::flat_hash_map& source_range_map,
+ const std::shared_ptr& cu);
+
+ private:
+ std::optional deserialize_module_instance_info(
+ const c10::IValue& iv,
+ const std::shared_ptr& cu);
+
+ ska::
+ flat_hash_map, InlinedCallStackPtr>
+ cached_inlined_callstacks_;
+ ska::flat_hash_map, ModuleInstanceInfo>
+ cached_module_instance_info_;
+};
+
+class TORCH_API CallStackDebugInfoUnpickler {
+ public:
+ ska::flat_hash_map unpickle(
+ const at::DataPtr& data,
+ size_t size,
+ const ska::flat_hash_map& source_range_map,
+ const std::shared_ptr& cu);
+
+ private:
+ InlinedCallStackDeserializer csds_;
+};
+
+} // namespace torch::jit
diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h
new file mode 100644
index 0000000000000000000000000000000000000000..8b2d6d84716ae5bfc5763c38f53bb5f349234f7f
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h
@@ -0,0 +1,279 @@
+#pragma once
+
+#include
+#include
+#include
+#include