ZTWHHH commited on
Commit
f8b1fec
·
verified ·
1 Parent(s): 5071327

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videollama2/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html +3 -0
  2. videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc +0 -0
  3. videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc +0 -0
  4. videollama2/lib/python3.10/site-packages/setuptools/tests/mod_with_constant.py +1 -0
  5. videollama2/lib/python3.10/site-packages/setuptools/tests/namespaces.py +90 -0
  6. videollama2/lib/python3.10/site-packages/setuptools/tests/test_archive_util.py +36 -0
  7. videollama2/lib/python3.10/site-packages/setuptools/tests/test_packageindex.py +267 -0
  8. videollama2/lib/python3.10/site-packages/setuptools/tests/test_sdist.py +972 -0
  9. videollama2/lib/python3.10/site-packages/setuptools/tests/test_wheel.py +714 -0
  10. videollama2/lib/python3.10/site-packages/setuptools/tests/text.py +4 -0
  11. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h +183 -0
  12. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h +18 -0
  13. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/GdsFile.h +7 -0
  14. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h +11 -0
  15. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h +20 -0
  16. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h +10 -0
  17. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h +52 -0
  18. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h +11 -0
  19. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h +27 -0
  20. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h +219 -0
  21. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h +7 -0
  22. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h +13 -0
  23. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h +126 -0
  24. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h +37 -0
  25. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h +9 -0
  26. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h +20 -0
  27. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h +9 -0
  28. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h +10 -0
  29. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h +17 -0
  30. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h +30 -0
  31. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h +12 -0
  32. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h +9 -0
  33. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h +21 -0
  34. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h +45 -0
  35. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h +13 -0
  36. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h +15 -0
  37. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h +20 -0
  38. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h +32 -0
  39. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h +16 -0
  40. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h +134 -0
  41. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h +15 -0
  42. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h +10 -0
  43. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h +9 -0
  44. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h +81 -0
  45. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h +9 -0
  46. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h +14 -0
  47. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h +14 -0
  48. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h +56 -0
  49. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h +29 -0
  50. vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h +25 -0
videollama2/lib/python3.10/site-packages/setuptools/tests/indexes/test_links_priority/external.html ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ <html><body>
2
+ <a href="/foobar-0.1.tar.gz#md5=1__bad_md5___">bad old link</a>
3
+ </body></html>
videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
videollama2/lib/python3.10/site-packages/setuptools/tests/integration/__pycache__/test_pip_install_sdist.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
videollama2/lib/python3.10/site-packages/setuptools/tests/mod_with_constant.py ADDED
@@ -0,0 +1 @@
 
 
1
+ value = 'three, sir!'
videollama2/lib/python3.10/site-packages/setuptools/tests/namespaces.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import json
3
+ import textwrap
4
+ from pathlib import Path
5
+
6
+
7
+ def iter_namespace_pkgs(namespace):
8
+ parts = namespace.split(".")
9
+ for i in range(len(parts)):
10
+ yield ".".join(parts[: i + 1])
11
+
12
+
13
+ def build_namespace_package(tmpdir, name, version="1.0", impl="pkg_resources"):
14
+ src_dir = tmpdir / name
15
+ src_dir.mkdir()
16
+ setup_py = src_dir / 'setup.py'
17
+ namespace, _, rest = name.rpartition('.')
18
+ namespaces = list(iter_namespace_pkgs(namespace))
19
+ setup_args = {
20
+ "name": name,
21
+ "version": version,
22
+ "packages": namespaces,
23
+ }
24
+
25
+ if impl == "pkg_resources":
26
+ tmpl = '__import__("pkg_resources").declare_namespace(__name__)'
27
+ setup_args["namespace_packages"] = namespaces
28
+ elif impl == "pkgutil":
29
+ tmpl = '__path__ = __import__("pkgutil").extend_path(__path__, __name__)'
30
+ else:
31
+ raise ValueError(f"Cannot recognise {impl=} when creating namespaces")
32
+
33
+ args = json.dumps(setup_args, indent=4)
34
+ assert ast.literal_eval(args) # ensure it is valid Python
35
+
36
+ script = textwrap.dedent(
37
+ """\
38
+ import setuptools
39
+ args = {args}
40
+ setuptools.setup(**args)
41
+ """
42
+ ).format(args=args)
43
+ setup_py.write_text(script, encoding='utf-8')
44
+
45
+ ns_pkg_dir = Path(src_dir, namespace.replace(".", "/"))
46
+ ns_pkg_dir.mkdir(parents=True)
47
+
48
+ for ns in namespaces:
49
+ pkg_init = src_dir / ns.replace(".", "/") / '__init__.py'
50
+ pkg_init.write_text(tmpl, encoding='utf-8')
51
+
52
+ pkg_mod = ns_pkg_dir / (rest + '.py')
53
+ some_functionality = 'name = {rest!r}'.format(**locals())
54
+ pkg_mod.write_text(some_functionality, encoding='utf-8')
55
+ return src_dir
56
+
57
+
58
+ def build_pep420_namespace_package(tmpdir, name):
59
+ src_dir = tmpdir / name
60
+ src_dir.mkdir()
61
+ pyproject = src_dir / "pyproject.toml"
62
+ namespace, _, rest = name.rpartition(".")
63
+ script = f"""\
64
+ [build-system]
65
+ requires = ["setuptools"]
66
+ build-backend = "setuptools.build_meta"
67
+
68
+ [project]
69
+ name = "{name}"
70
+ version = "3.14159"
71
+ """
72
+ pyproject.write_text(textwrap.dedent(script), encoding='utf-8')
73
+ ns_pkg_dir = Path(src_dir, namespace.replace(".", "/"))
74
+ ns_pkg_dir.mkdir(parents=True)
75
+ pkg_mod = ns_pkg_dir / (rest + ".py")
76
+ some_functionality = f"name = {rest!r}"
77
+ pkg_mod.write_text(some_functionality, encoding='utf-8')
78
+ return src_dir
79
+
80
+
81
+ def make_site_dir(target):
82
+ """
83
+ Add a sitecustomize.py module in target to cause
84
+ target to be added to site dirs such that .pth files
85
+ are processed there.
86
+ """
87
+ sc = target / 'sitecustomize.py'
88
+ target_str = str(target)
89
+ tmpl = '__import__("site").addsitedir({target_str!r})'
90
+ sc.write_text(tmpl.format(**locals()), encoding='utf-8')
videollama2/lib/python3.10/site-packages/setuptools/tests/test_archive_util.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import tarfile
3
+
4
+ import pytest
5
+
6
+ from setuptools import archive_util
7
+
8
+
9
+ @pytest.fixture
10
+ def tarfile_with_unicode(tmpdir):
11
+ """
12
+ Create a tarfile containing only a file whose name is
13
+ a zero byte file called testimäge.png.
14
+ """
15
+ tarobj = io.BytesIO()
16
+
17
+ with tarfile.open(fileobj=tarobj, mode="w:gz") as tgz:
18
+ data = b""
19
+
20
+ filename = "testimäge.png"
21
+
22
+ t = tarfile.TarInfo(filename)
23
+ t.size = len(data)
24
+
25
+ tgz.addfile(t, io.BytesIO(data))
26
+
27
+ target = tmpdir / 'unicode-pkg-1.0.tar.gz'
28
+ with open(str(target), mode='wb') as tf:
29
+ tf.write(tarobj.getvalue())
30
+ return str(target)
31
+
32
+
33
+ @pytest.mark.xfail(reason="#710 and #712")
34
+ def test_unicode_files(tarfile_with_unicode, tmpdir):
35
+ target = tmpdir / 'out'
36
+ archive_util.unpack_archive(tarfile_with_unicode, str(target))
videollama2/lib/python3.10/site-packages/setuptools/tests/test_packageindex.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import http.client
2
+ import re
3
+ import urllib.error
4
+ import urllib.request
5
+ from inspect import cleandoc
6
+
7
+ import pytest
8
+
9
+ import setuptools.package_index
10
+
11
+ import distutils.errors
12
+
13
+
14
+ class TestPackageIndex:
15
+ def test_regex(self):
16
+ hash_url = 'http://other_url?:action=show_md5&amp;'
17
+ hash_url += 'digest=0123456789abcdef0123456789abcdef'
18
+ doc = """
19
+ <a href="http://some_url">Name</a>
20
+ (<a title="MD5 hash"
21
+ href="{hash_url}">md5</a>)
22
+ """.lstrip().format(**locals())
23
+ assert setuptools.package_index.PYPI_MD5.match(doc)
24
+
25
+ def test_bad_url_bad_port(self):
26
+ index = setuptools.package_index.PackageIndex()
27
+ url = 'http://127.0.0.1:0/nonesuch/test_package_index'
28
+ with pytest.raises(Exception, match=re.escape(url)):
29
+ v = index.open_url(url)
30
+ assert isinstance(v, urllib.error.HTTPError)
31
+
32
+ def test_bad_url_typo(self):
33
+ # issue 16
34
+ # easy_install inquant.contentmirror.plone breaks because of a typo
35
+ # in its home URL
36
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
37
+
38
+ url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
39
+
40
+ with pytest.raises(Exception, match=re.escape(url)):
41
+ v = index.open_url(url)
42
+ assert isinstance(v, urllib.error.HTTPError)
43
+
44
+ def test_bad_url_bad_status_line(self):
45
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
46
+
47
+ def _urlopen(*args):
48
+ raise http.client.BadStatusLine('line')
49
+
50
+ index.opener = _urlopen
51
+ url = 'http://example.com'
52
+ with pytest.raises(Exception, match=r'line'):
53
+ index.open_url(url)
54
+
55
+ def test_bad_url_double_scheme(self):
56
+ """
57
+ A bad URL with a double scheme should raise a DistutilsError.
58
+ """
59
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
60
+
61
+ # issue 20
62
+ url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
63
+ try:
64
+ index.open_url(url)
65
+ except distutils.errors.DistutilsError as error:
66
+ msg = str(error)
67
+ assert (
68
+ 'nonnumeric port' in msg
69
+ or 'getaddrinfo failed' in msg
70
+ or 'Name or service not known' in msg
71
+ )
72
+ return
73
+ raise RuntimeError("Did not raise")
74
+
75
+ def test_url_ok(self):
76
+ index = setuptools.package_index.PackageIndex(hosts=('www.example.com',))
77
+ url = 'file:///tmp/test_package_index'
78
+ assert index.url_ok(url, True)
79
+
80
+ def test_parse_bdist_wininst(self):
81
+ parse = setuptools.package_index.parse_bdist_wininst
82
+
83
+ actual = parse('reportlab-2.5.win32-py2.4.exe')
84
+ expected = 'reportlab-2.5', '2.4', 'win32'
85
+ assert actual == expected
86
+
87
+ actual = parse('reportlab-2.5.win32.exe')
88
+ expected = 'reportlab-2.5', None, 'win32'
89
+ assert actual == expected
90
+
91
+ actual = parse('reportlab-2.5.win-amd64-py2.7.exe')
92
+ expected = 'reportlab-2.5', '2.7', 'win-amd64'
93
+ assert actual == expected
94
+
95
+ actual = parse('reportlab-2.5.win-amd64.exe')
96
+ expected = 'reportlab-2.5', None, 'win-amd64'
97
+ assert actual == expected
98
+
99
+ def test__vcs_split_rev_from_url(self):
100
+ """
101
+ Test the basic usage of _vcs_split_rev_from_url
102
+ """
103
+ vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
104
+ url, rev = vsrfu('https://example.com/bar@2995')
105
+ assert url == 'https://example.com/bar'
106
+ assert rev == '2995'
107
+
108
+ def test_local_index(self, tmpdir):
109
+ """
110
+ local_open should be able to read an index from the file system.
111
+ """
112
+ index_file = tmpdir / 'index.html'
113
+ with index_file.open('w') as f:
114
+ f.write('<div>content</div>')
115
+ url = 'file:' + urllib.request.pathname2url(str(tmpdir)) + '/'
116
+ res = setuptools.package_index.local_open(url)
117
+ assert 'content' in res.read()
118
+
119
+ def test_egg_fragment(self):
120
+ """
121
+ EGG fragments must comply to PEP 440
122
+ """
123
+ epoch = [
124
+ '',
125
+ '1!',
126
+ ]
127
+ releases = [
128
+ '0',
129
+ '0.0',
130
+ '0.0.0',
131
+ ]
132
+ pre = [
133
+ 'a0',
134
+ 'b0',
135
+ 'rc0',
136
+ ]
137
+ post = ['.post0']
138
+ dev = [
139
+ '.dev0',
140
+ ]
141
+ local = [
142
+ ('', ''),
143
+ ('+ubuntu.0', '+ubuntu.0'),
144
+ ('+ubuntu-0', '+ubuntu.0'),
145
+ ('+ubuntu_0', '+ubuntu.0'),
146
+ ]
147
+ versions = [
148
+ [''.join([e, r, p, loc]) for loc in locs]
149
+ for e in epoch
150
+ for r in releases
151
+ for p in sum([pre, post, dev], [''])
152
+ for locs in local
153
+ ]
154
+ for v, vc in versions:
155
+ dists = list(
156
+ setuptools.package_index.distros_for_url(
157
+ 'http://example.com/example-foo.zip#egg=example-foo-' + v
158
+ )
159
+ )
160
+ assert dists[0].version == ''
161
+ assert dists[1].version == vc
162
+
163
+ def test_download_git_with_rev(self, tmp_path, fp):
164
+ url = 'git+https://github.example/group/project@master#egg=foo'
165
+ index = setuptools.package_index.PackageIndex()
166
+
167
+ expected_dir = tmp_path / 'project@master'
168
+ fp.register([
169
+ 'git',
170
+ 'clone',
171
+ '--quiet',
172
+ 'https://github.example/group/project',
173
+ expected_dir,
174
+ ])
175
+ fp.register(['git', '-C', expected_dir, 'checkout', '--quiet', 'master'])
176
+
177
+ result = index.download(url, tmp_path)
178
+
179
+ assert result == str(expected_dir)
180
+ assert len(fp.calls) == 2
181
+
182
+ def test_download_git_no_rev(self, tmp_path, fp):
183
+ url = 'git+https://github.example/group/project#egg=foo'
184
+ index = setuptools.package_index.PackageIndex()
185
+
186
+ expected_dir = tmp_path / 'project'
187
+ fp.register([
188
+ 'git',
189
+ 'clone',
190
+ '--quiet',
191
+ 'https://github.example/group/project',
192
+ expected_dir,
193
+ ])
194
+ index.download(url, tmp_path)
195
+
196
+ def test_download_svn(self, tmp_path):
197
+ url = 'svn+https://svn.example/project#egg=foo'
198
+ index = setuptools.package_index.PackageIndex()
199
+
200
+ msg = r".*SVN download is not supported.*"
201
+ with pytest.raises(distutils.errors.DistutilsError, match=msg):
202
+ index.download(url, tmp_path)
203
+
204
+
205
+ class TestContentCheckers:
206
+ def test_md5(self):
207
+ checker = setuptools.package_index.HashChecker.from_url(
208
+ 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478'
209
+ )
210
+ checker.feed('You should probably not be using MD5'.encode('ascii'))
211
+ assert checker.hash.hexdigest() == 'f12895fdffbd45007040d2e44df98478'
212
+ assert checker.is_valid()
213
+
214
+ def test_other_fragment(self):
215
+ "Content checks should succeed silently if no hash is present"
216
+ checker = setuptools.package_index.HashChecker.from_url(
217
+ 'http://foo/bar#something%20completely%20different'
218
+ )
219
+ checker.feed('anything'.encode('ascii'))
220
+ assert checker.is_valid()
221
+
222
+ def test_blank_md5(self):
223
+ "Content checks should succeed if a hash is empty"
224
+ checker = setuptools.package_index.HashChecker.from_url('http://foo/bar#md5=')
225
+ checker.feed('anything'.encode('ascii'))
226
+ assert checker.is_valid()
227
+
228
+ def test_get_hash_name_md5(self):
229
+ checker = setuptools.package_index.HashChecker.from_url(
230
+ 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478'
231
+ )
232
+ assert checker.hash_name == 'md5'
233
+
234
+ def test_report(self):
235
+ checker = setuptools.package_index.HashChecker.from_url(
236
+ 'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478'
237
+ )
238
+ rep = checker.report(lambda x: x, 'My message about %s')
239
+ assert rep == 'My message about md5'
240
+
241
+
242
+ class TestPyPIConfig:
243
+ def test_percent_in_password(self, tmp_home_dir):
244
+ pypirc = tmp_home_dir / '.pypirc'
245
+ pypirc.write_text(
246
+ cleandoc(
247
+ """
248
+ [pypi]
249
+ repository=https://pypi.org
250
+ username=jaraco
251
+ password=pity%
252
+ """
253
+ ),
254
+ encoding="utf-8",
255
+ )
256
+ cfg = setuptools.package_index.PyPIConfig()
257
+ cred = cfg.creds_by_repository['https://pypi.org']
258
+ assert cred.username == 'jaraco'
259
+ assert cred.password == 'pity%'
260
+
261
+
262
+ @pytest.mark.timeout(1)
263
+ def test_REL_DoS():
264
+ """
265
+ REL should not hang on a contrived attack string.
266
+ """
267
+ setuptools.package_index.REL.search('< rel=' + ' ' * 2**12)
videollama2/lib/python3.10/site-packages/setuptools/tests/test_sdist.py ADDED
@@ -0,0 +1,972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """sdist tests"""
2
+
3
+ import contextlib
4
+ import io
5
+ import logging
6
+ import os
7
+ import pathlib
8
+ import sys
9
+ import tarfile
10
+ import tempfile
11
+ import unicodedata
12
+ from inspect import cleandoc
13
+ from pathlib import Path
14
+ from unittest import mock
15
+
16
+ import jaraco.path
17
+ import pytest
18
+
19
+ from setuptools import Command, SetuptoolsDeprecationWarning
20
+ from setuptools._importlib import metadata
21
+ from setuptools.command.egg_info import manifest_maker
22
+ from setuptools.command.sdist import sdist
23
+ from setuptools.dist import Distribution
24
+ from setuptools.extension import Extension
25
+ from setuptools.tests import fail_on_ascii
26
+
27
+ from .text import Filenames
28
+
29
+ import distutils
30
+ from distutils.core import run_setup
31
+
32
+ SETUP_ATTRS = {
33
+ 'name': 'sdist_test',
34
+ 'version': '0.0',
35
+ 'packages': ['sdist_test'],
36
+ 'package_data': {'sdist_test': ['*.txt']},
37
+ 'data_files': [("data", [os.path.join("d", "e.dat")])],
38
+ }
39
+
40
+ SETUP_PY = f"""\
41
+ from setuptools import setup
42
+
43
+ setup(**{SETUP_ATTRS!r})
44
+ """
45
+
46
+ EXTENSION = Extension(
47
+ name="sdist_test.f",
48
+ sources=[os.path.join("sdist_test", "f.c")],
49
+ depends=[os.path.join("sdist_test", "f.h")],
50
+ )
51
+ EXTENSION_SOURCES = EXTENSION.sources + EXTENSION.depends
52
+
53
+
54
+ @contextlib.contextmanager
55
+ def quiet():
56
+ old_stdout, old_stderr = sys.stdout, sys.stderr
57
+ sys.stdout, sys.stderr = io.StringIO(), io.StringIO()
58
+ try:
59
+ yield
60
+ finally:
61
+ sys.stdout, sys.stderr = old_stdout, old_stderr
62
+
63
+
64
+ # Convert to POSIX path
65
+ def posix(path):
66
+ if not isinstance(path, str):
67
+ return path.replace(os.sep.encode('ascii'), b'/')
68
+ else:
69
+ return path.replace(os.sep, '/')
70
+
71
+
72
+ # HFS Plus uses decomposed UTF-8
73
+ def decompose(path):
74
+ if isinstance(path, str):
75
+ return unicodedata.normalize('NFD', path)
76
+ try:
77
+ path = path.decode('utf-8')
78
+ path = unicodedata.normalize('NFD', path)
79
+ path = path.encode('utf-8')
80
+ except UnicodeError:
81
+ pass # Not UTF-8
82
+ return path
83
+
84
+
85
+ def read_all_bytes(filename):
86
+ with open(filename, 'rb') as fp:
87
+ return fp.read()
88
+
89
+
90
+ def latin1_fail():
91
+ try:
92
+ desc, filename = tempfile.mkstemp(suffix=Filenames.latin_1)
93
+ os.close(desc)
94
+ os.remove(filename)
95
+ except Exception:
96
+ return True
97
+
98
+
99
+ fail_on_latin1_encoded_filenames = pytest.mark.xfail(
100
+ latin1_fail(),
101
+ reason="System does not support latin-1 filenames",
102
+ )
103
+
104
+
105
+ skip_under_xdist = pytest.mark.skipif(
106
+ "os.environ.get('PYTEST_XDIST_WORKER')",
107
+ reason="pytest-dev/pytest-xdist#843",
108
+ )
109
+ skip_under_stdlib_distutils = pytest.mark.skipif(
110
+ not distutils.__package__.startswith('setuptools'),
111
+ reason="the test is not supported with stdlib distutils",
112
+ )
113
+
114
+
115
+ def touch(path):
116
+ open(path, 'wb').close()
117
+ return path
118
+
119
+
120
+ def symlink_or_skip_test(src, dst):
121
+ try:
122
+ os.symlink(src, dst)
123
+ except (OSError, NotImplementedError):
124
+ pytest.skip("symlink not supported in OS")
125
+ return None
126
+ return dst
127
+
128
+
129
+ class TestSdistTest:
130
+ @pytest.fixture(autouse=True)
131
+ def source_dir(self, tmpdir):
132
+ tmpdir = tmpdir / "project_root"
133
+ tmpdir.mkdir()
134
+
135
+ (tmpdir / 'setup.py').write_text(SETUP_PY, encoding='utf-8')
136
+
137
+ # Set up the rest of the test package
138
+ test_pkg = tmpdir / 'sdist_test'
139
+ test_pkg.mkdir()
140
+ data_folder = tmpdir / 'd'
141
+ data_folder.mkdir()
142
+ # *.rst was not included in package_data, so c.rst should not be
143
+ # automatically added to the manifest when not under version control
144
+ for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
145
+ touch(test_pkg / fname)
146
+ touch(data_folder / 'e.dat')
147
+ # C sources are not included by default, but they will be,
148
+ # if an extension module uses them as sources or depends
149
+ for fname in EXTENSION_SOURCES:
150
+ touch(tmpdir / fname)
151
+
152
+ with tmpdir.as_cwd():
153
+ yield tmpdir
154
+
155
+ def assert_package_data_in_manifest(self, cmd):
156
+ manifest = cmd.filelist.files
157
+ assert os.path.join('sdist_test', 'a.txt') in manifest
158
+ assert os.path.join('sdist_test', 'b.txt') in manifest
159
+ assert os.path.join('sdist_test', 'c.rst') not in manifest
160
+ assert os.path.join('d', 'e.dat') in manifest
161
+
162
+ def setup_with_extension(self):
163
+ setup_attrs = {**SETUP_ATTRS, 'ext_modules': [EXTENSION]}
164
+
165
+ dist = Distribution(setup_attrs)
166
+ dist.script_name = 'setup.py'
167
+ cmd = sdist(dist)
168
+ cmd.ensure_finalized()
169
+
170
+ with quiet():
171
+ cmd.run()
172
+
173
+ return cmd
174
+
175
+ def test_package_data_in_sdist(self):
176
+ """Regression test for pull request #4: ensures that files listed in
177
+ package_data are included in the manifest even if they're not added to
178
+ version control.
179
+ """
180
+
181
+ dist = Distribution(SETUP_ATTRS)
182
+ dist.script_name = 'setup.py'
183
+ cmd = sdist(dist)
184
+ cmd.ensure_finalized()
185
+
186
+ with quiet():
187
+ cmd.run()
188
+
189
+ self.assert_package_data_in_manifest(cmd)
190
+
191
+ def test_package_data_and_include_package_data_in_sdist(self):
192
+ """
193
+ Ensure package_data and include_package_data work
194
+ together.
195
+ """
196
+ setup_attrs = {**SETUP_ATTRS, 'include_package_data': True}
197
+ assert setup_attrs['package_data']
198
+
199
+ dist = Distribution(setup_attrs)
200
+ dist.script_name = 'setup.py'
201
+ cmd = sdist(dist)
202
+ cmd.ensure_finalized()
203
+
204
+ with quiet():
205
+ cmd.run()
206
+
207
+ self.assert_package_data_in_manifest(cmd)
208
+
209
+ def test_extension_sources_in_sdist(self):
210
+ """
211
+ Ensure that the files listed in Extension.sources and Extension.depends
212
+ are automatically included in the manifest.
213
+ """
214
+ cmd = self.setup_with_extension()
215
+ self.assert_package_data_in_manifest(cmd)
216
+ manifest = cmd.filelist.files
217
+ for path in EXTENSION_SOURCES:
218
+ assert path in manifest
219
+
220
+ def test_missing_extension_sources(self):
221
+ """
222
+ Similar to test_extension_sources_in_sdist but the referenced files don't exist.
223
+ Missing files should not be included in distribution (with no error raised).
224
+ """
225
+ for path in EXTENSION_SOURCES:
226
+ os.remove(path)
227
+
228
+ cmd = self.setup_with_extension()
229
+ self.assert_package_data_in_manifest(cmd)
230
+ manifest = cmd.filelist.files
231
+ for path in EXTENSION_SOURCES:
232
+ assert path not in manifest
233
+
234
+ def test_symlinked_extension_sources(self):
235
+ """
236
+ Similar to test_extension_sources_in_sdist but the referenced files are
237
+ instead symbolic links to project-local files. Referenced file paths
238
+ should be included. Symlink targets themselves should NOT be included.
239
+ """
240
+ symlinked = []
241
+ for path in EXTENSION_SOURCES:
242
+ base, ext = os.path.splitext(path)
243
+ target = base + "_target." + ext
244
+
245
+ os.rename(path, target)
246
+ symlink_or_skip_test(os.path.basename(target), path)
247
+ symlinked.append(target)
248
+
249
+ cmd = self.setup_with_extension()
250
+ self.assert_package_data_in_manifest(cmd)
251
+ manifest = cmd.filelist.files
252
+ for path in EXTENSION_SOURCES:
253
+ assert path in manifest
254
+ for path in symlinked:
255
+ assert path not in manifest
256
+
257
+ _INVALID_PATHS = {
258
+ "must be relative": lambda: (
259
+ os.path.abspath(os.path.join("sdist_test", "f.h"))
260
+ ),
261
+ "can't have `..` segments": lambda: (
262
+ os.path.join("sdist_test", "..", "sdist_test", "f.h")
263
+ ),
264
+ "doesn't exist": lambda: (
265
+ os.path.join("sdist_test", "this_file_does_not_exist.h")
266
+ ),
267
+ "must be inside the project root": lambda: (
268
+ symlink_or_skip_test(
269
+ touch(os.path.join("..", "outside_of_project_root.h")),
270
+ "symlink.h",
271
+ )
272
+ ),
273
+ }
274
+
275
+ @skip_under_stdlib_distutils
276
+ @pytest.mark.parametrize("reason", _INVALID_PATHS.keys())
277
+ def test_invalid_extension_depends(self, reason, caplog):
278
+ """
279
+ Due to backwards compatibility reasons, `Extension.depends` should accept
280
+ invalid/weird paths, but then ignore them when building a sdist.
281
+
282
+ This test verifies that the source distribution is still built
283
+ successfully with such paths, but that instead of adding these paths to
284
+ the manifest, we emit an informational message, notifying the user that
285
+ the invalid path won't be automatically included.
286
+ """
287
+ invalid_path = self._INVALID_PATHS[reason]()
288
+ extension = Extension(
289
+ name="sdist_test.f",
290
+ sources=[],
291
+ depends=[invalid_path],
292
+ )
293
+ setup_attrs = {**SETUP_ATTRS, 'ext_modules': [extension]}
294
+
295
+ dist = Distribution(setup_attrs)
296
+ dist.script_name = 'setup.py'
297
+ cmd = sdist(dist)
298
+ cmd.ensure_finalized()
299
+
300
+ with quiet(), caplog.at_level(logging.INFO):
301
+ cmd.run()
302
+
303
+ self.assert_package_data_in_manifest(cmd)
304
+ manifest = cmd.filelist.files
305
+ assert invalid_path not in manifest
306
+
307
+ expected_message = [
308
+ message
309
+ for (logger, level, message) in caplog.record_tuples
310
+ if (
311
+ logger == "root" #
312
+ and level == logging.INFO #
313
+ and invalid_path in message #
314
+ )
315
+ ]
316
+ assert len(expected_message) == 1
317
+ (expected_message,) = expected_message
318
+ assert reason in expected_message
319
+
320
+ def test_custom_build_py(self):
321
+ """
322
+ Ensure projects defining custom build_py don't break
323
+ when creating sdists (issue #2849)
324
+ """
325
+ from distutils.command.build_py import build_py as OrigBuildPy
326
+
327
+ using_custom_command_guard = mock.Mock()
328
+
329
+ class CustomBuildPy(OrigBuildPy):
330
+ """
331
+ Some projects have custom commands inheriting from `distutils`
332
+ """
333
+
334
+ def get_data_files(self):
335
+ using_custom_command_guard()
336
+ return super().get_data_files()
337
+
338
+ setup_attrs = {**SETUP_ATTRS, 'include_package_data': True}
339
+ assert setup_attrs['package_data']
340
+
341
+ dist = Distribution(setup_attrs)
342
+ dist.script_name = 'setup.py'
343
+ cmd = sdist(dist)
344
+ cmd.ensure_finalized()
345
+
346
+ # Make sure we use the custom command
347
+ cmd.cmdclass = {'build_py': CustomBuildPy}
348
+ cmd.distribution.cmdclass = {'build_py': CustomBuildPy}
349
+ assert cmd.distribution.get_command_class('build_py') == CustomBuildPy
350
+
351
+ msg = "setuptools instead of distutils"
352
+ with quiet(), pytest.warns(SetuptoolsDeprecationWarning, match=msg):
353
+ cmd.run()
354
+
355
+ using_custom_command_guard.assert_called()
356
+ self.assert_package_data_in_manifest(cmd)
357
+
358
+ def test_setup_py_exists(self):
359
+ dist = Distribution(SETUP_ATTRS)
360
+ dist.script_name = 'foo.py'
361
+ cmd = sdist(dist)
362
+ cmd.ensure_finalized()
363
+
364
+ with quiet():
365
+ cmd.run()
366
+
367
+ manifest = cmd.filelist.files
368
+ assert 'setup.py' in manifest
369
+
370
+ def test_setup_py_missing(self):
371
+ dist = Distribution(SETUP_ATTRS)
372
+ dist.script_name = 'foo.py'
373
+ cmd = sdist(dist)
374
+ cmd.ensure_finalized()
375
+
376
+ if os.path.exists("setup.py"):
377
+ os.remove("setup.py")
378
+ with quiet():
379
+ cmd.run()
380
+
381
+ manifest = cmd.filelist.files
382
+ assert 'setup.py' not in manifest
383
+
384
+ def test_setup_py_excluded(self):
385
+ with open("MANIFEST.in", "w", encoding="utf-8") as manifest_file:
386
+ manifest_file.write("exclude setup.py")
387
+
388
+ dist = Distribution(SETUP_ATTRS)
389
+ dist.script_name = 'foo.py'
390
+ cmd = sdist(dist)
391
+ cmd.ensure_finalized()
392
+
393
+ with quiet():
394
+ cmd.run()
395
+
396
+ manifest = cmd.filelist.files
397
+ assert 'setup.py' not in manifest
398
+
399
+ def test_defaults_case_sensitivity(self, source_dir):
400
+ """
401
+ Make sure default files (README.*, etc.) are added in a case-sensitive
402
+ way to avoid problems with packages built on Windows.
403
+ """
404
+
405
+ touch(source_dir / 'readme.rst')
406
+ touch(source_dir / 'SETUP.cfg')
407
+
408
+ dist = Distribution(SETUP_ATTRS)
409
+ # the extension deliberately capitalized for this test
410
+ # to make sure the actual filename (not capitalized) gets added
411
+ # to the manifest
412
+ dist.script_name = 'setup.PY'
413
+ cmd = sdist(dist)
414
+ cmd.ensure_finalized()
415
+
416
+ with quiet():
417
+ cmd.run()
418
+
419
+ # lowercase all names so we can test in a
420
+ # case-insensitive way to make sure the files
421
+ # are not included.
422
+ manifest = map(lambda x: x.lower(), cmd.filelist.files)
423
+ assert 'readme.rst' not in manifest, manifest
424
+ assert 'setup.py' not in manifest, manifest
425
+ assert 'setup.cfg' not in manifest, manifest
426
+
427
+ def test_exclude_dev_only_cache_folders(self, source_dir):
428
+ included = {
429
+ # Emulate problem in https://github.com/pypa/setuptools/issues/4601
430
+ "MANIFEST.in": (
431
+ "global-include LICEN[CS]E* COPYING* NOTICE* AUTHORS*\n"
432
+ "global-include *.txt\n"
433
+ ),
434
+ # For the sake of being conservative and limiting unforeseen side-effects
435
+ # we just exclude dev-only cache folders at the root of the repository:
436
+ "test/.venv/lib/python3.9/site-packages/bar-2.dist-info/AUTHORS.rst": "",
437
+ "src/.nox/py/lib/python3.12/site-packages/bar-2.dist-info/COPYING.txt": "",
438
+ "doc/.tox/default/lib/python3.11/site-packages/foo-4.dist-info/LICENSE": "",
439
+ # Let's test against false positives with similarly named files:
440
+ ".venv-requirements.txt": "",
441
+ ".tox-coveragerc.txt": "",
442
+ ".noxy/coveragerc.txt": "",
443
+ }
444
+
445
+ excluded = {
446
+ # .tox/.nox/.venv are well-know folders present at the root of Python repos
447
+ # and therefore should be excluded
448
+ ".tox/release/lib/python3.11/site-packages/foo-4.dist-info/LICENSE": "",
449
+ ".nox/py/lib/python3.12/site-packages/bar-2.dist-info/COPYING.txt": "",
450
+ ".venv/lib/python3.9/site-packages/bar-2.dist-info/AUTHORS.rst": "",
451
+ }
452
+
453
+ for file, content in {**excluded, **included}.items():
454
+ Path(source_dir, file).parent.mkdir(parents=True, exist_ok=True)
455
+ Path(source_dir, file).write_text(content, encoding="utf-8")
456
+
457
+ cmd = self.setup_with_extension()
458
+ self.assert_package_data_in_manifest(cmd)
459
+ manifest = {f.replace(os.sep, '/') for f in cmd.filelist.files}
460
+ for path in excluded:
461
+ assert os.path.exists(path)
462
+ assert path not in manifest, (path, manifest)
463
+ for path in included:
464
+ assert os.path.exists(path)
465
+ assert path in manifest, (path, manifest)
466
+
467
+ @fail_on_ascii
468
+ def test_manifest_is_written_with_utf8_encoding(self):
469
+ # Test for #303.
470
+ dist = Distribution(SETUP_ATTRS)
471
+ dist.script_name = 'setup.py'
472
+ mm = manifest_maker(dist)
473
+ mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
474
+ os.mkdir('sdist_test.egg-info')
475
+
476
+ # UTF-8 filename
477
+ filename = os.path.join('sdist_test', 'smörbröd.py')
478
+
479
+ # Must create the file or it will get stripped.
480
+ touch(filename)
481
+
482
+ # Add UTF-8 filename and write manifest
483
+ with quiet():
484
+ mm.run()
485
+ mm.filelist.append(filename)
486
+ mm.write_manifest()
487
+
488
+ contents = read_all_bytes(mm.manifest)
489
+
490
+ # The manifest should be UTF-8 encoded
491
+ u_contents = contents.decode('UTF-8')
492
+
493
+ # The manifest should contain the UTF-8 filename
494
+ assert posix(filename) in u_contents
495
+
496
+ @fail_on_ascii
497
+ def test_write_manifest_allows_utf8_filenames(self):
498
+ # Test for #303.
499
+ dist = Distribution(SETUP_ATTRS)
500
+ dist.script_name = 'setup.py'
501
+ mm = manifest_maker(dist)
502
+ mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
503
+ os.mkdir('sdist_test.egg-info')
504
+
505
+ filename = os.path.join(b'sdist_test', Filenames.utf_8)
506
+
507
+ # Must touch the file or risk removal
508
+ touch(filename)
509
+
510
+ # Add filename and write manifest
511
+ with quiet():
512
+ mm.run()
513
+ u_filename = filename.decode('utf-8')
514
+ mm.filelist.files.append(u_filename)
515
+ # Re-write manifest
516
+ mm.write_manifest()
517
+
518
+ contents = read_all_bytes(mm.manifest)
519
+
520
+ # The manifest should be UTF-8 encoded
521
+ contents.decode('UTF-8')
522
+
523
+ # The manifest should contain the UTF-8 filename
524
+ assert posix(filename) in contents
525
+
526
+ # The filelist should have been updated as well
527
+ assert u_filename in mm.filelist.files
528
+
529
+ @skip_under_xdist
530
+ def test_write_manifest_skips_non_utf8_filenames(self):
531
+ """
532
+ Files that cannot be encoded to UTF-8 (specifically, those that
533
+ weren't originally successfully decoded and have surrogate
534
+ escapes) should be omitted from the manifest.
535
+ See https://bitbucket.org/tarek/distribute/issue/303 for history.
536
+ """
537
+ dist = Distribution(SETUP_ATTRS)
538
+ dist.script_name = 'setup.py'
539
+ mm = manifest_maker(dist)
540
+ mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
541
+ os.mkdir('sdist_test.egg-info')
542
+
543
+ # Latin-1 filename
544
+ filename = os.path.join(b'sdist_test', Filenames.latin_1)
545
+
546
+ # Add filename with surrogates and write manifest
547
+ with quiet():
548
+ mm.run()
549
+ u_filename = filename.decode('utf-8', 'surrogateescape')
550
+ mm.filelist.append(u_filename)
551
+ # Re-write manifest
552
+ mm.write_manifest()
553
+
554
+ contents = read_all_bytes(mm.manifest)
555
+
556
+ # The manifest should be UTF-8 encoded
557
+ contents.decode('UTF-8')
558
+
559
+ # The Latin-1 filename should have been skipped
560
+ assert posix(filename) not in contents
561
+
562
+ # The filelist should have been updated as well
563
+ assert u_filename not in mm.filelist.files
564
+
565
+ @fail_on_ascii
566
+ def test_manifest_is_read_with_utf8_encoding(self):
567
+ # Test for #303.
568
+ dist = Distribution(SETUP_ATTRS)
569
+ dist.script_name = 'setup.py'
570
+ cmd = sdist(dist)
571
+ cmd.ensure_finalized()
572
+
573
+ # Create manifest
574
+ with quiet():
575
+ cmd.run()
576
+
577
+ # Add UTF-8 filename to manifest
578
+ filename = os.path.join(b'sdist_test', Filenames.utf_8)
579
+ cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
580
+ manifest = open(cmd.manifest, 'ab')
581
+ manifest.write(b'\n' + filename)
582
+ manifest.close()
583
+
584
+ # The file must exist to be included in the filelist
585
+ touch(filename)
586
+
587
+ # Re-read manifest
588
+ cmd.filelist.files = []
589
+ with quiet():
590
+ cmd.read_manifest()
591
+
592
+ # The filelist should contain the UTF-8 filename
593
+ filename = filename.decode('utf-8')
594
+ assert filename in cmd.filelist.files
595
+
596
+ @fail_on_latin1_encoded_filenames
597
+ def test_read_manifest_skips_non_utf8_filenames(self):
598
+ # Test for #303.
599
+ dist = Distribution(SETUP_ATTRS)
600
+ dist.script_name = 'setup.py'
601
+ cmd = sdist(dist)
602
+ cmd.ensure_finalized()
603
+
604
+ # Create manifest
605
+ with quiet():
606
+ cmd.run()
607
+
608
+ # Add Latin-1 filename to manifest
609
+ filename = os.path.join(b'sdist_test', Filenames.latin_1)
610
+ cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
611
+ manifest = open(cmd.manifest, 'ab')
612
+ manifest.write(b'\n' + filename)
613
+ manifest.close()
614
+
615
+ # The file must exist to be included in the filelist
616
+ touch(filename)
617
+
618
+ # Re-read manifest
619
+ cmd.filelist.files = []
620
+ with quiet():
621
+ cmd.read_manifest()
622
+
623
+ # The Latin-1 filename should have been skipped
624
+ filename = filename.decode('latin-1')
625
+ assert filename not in cmd.filelist.files
626
+
627
+ @fail_on_ascii
628
+ @fail_on_latin1_encoded_filenames
629
+ def test_sdist_with_utf8_encoded_filename(self):
630
+ # Test for #303.
631
+ dist = Distribution(self.make_strings(SETUP_ATTRS))
632
+ dist.script_name = 'setup.py'
633
+ cmd = sdist(dist)
634
+ cmd.ensure_finalized()
635
+
636
+ filename = os.path.join(b'sdist_test', Filenames.utf_8)
637
+ touch(filename)
638
+
639
+ with quiet():
640
+ cmd.run()
641
+
642
+ if sys.platform == 'darwin':
643
+ filename = decompose(filename)
644
+
645
+ fs_enc = sys.getfilesystemencoding()
646
+
647
+ if sys.platform == 'win32':
648
+ if fs_enc == 'cp1252':
649
+ # Python mangles the UTF-8 filename
650
+ filename = filename.decode('cp1252')
651
+ assert filename in cmd.filelist.files
652
+ else:
653
+ filename = filename.decode('mbcs')
654
+ assert filename in cmd.filelist.files
655
+ else:
656
+ filename = filename.decode('utf-8')
657
+ assert filename in cmd.filelist.files
658
+
659
+ @classmethod
660
+ def make_strings(cls, item):
661
+ if isinstance(item, dict):
662
+ return {key: cls.make_strings(value) for key, value in item.items()}
663
+ if isinstance(item, list):
664
+ return list(map(cls.make_strings, item))
665
+ return str(item)
666
+
667
+ @fail_on_latin1_encoded_filenames
668
+ @skip_under_xdist
669
+ def test_sdist_with_latin1_encoded_filename(self):
670
+ # Test for #303.
671
+ dist = Distribution(self.make_strings(SETUP_ATTRS))
672
+ dist.script_name = 'setup.py'
673
+ cmd = sdist(dist)
674
+ cmd.ensure_finalized()
675
+
676
+ # Latin-1 filename
677
+ filename = os.path.join(b'sdist_test', Filenames.latin_1)
678
+ touch(filename)
679
+ assert os.path.isfile(filename)
680
+
681
+ with quiet():
682
+ cmd.run()
683
+
684
+ # not all windows systems have a default FS encoding of cp1252
685
+ if sys.platform == 'win32':
686
+ # Latin-1 is similar to Windows-1252 however
687
+ # on mbcs filesys it is not in latin-1 encoding
688
+ fs_enc = sys.getfilesystemencoding()
689
+ if fs_enc != 'mbcs':
690
+ fs_enc = 'latin-1'
691
+ filename = filename.decode(fs_enc)
692
+
693
+ assert filename in cmd.filelist.files
694
+ else:
695
+ # The Latin-1 filename should have been skipped
696
+ filename = filename.decode('latin-1')
697
+ assert filename not in cmd.filelist.files
698
+
699
+ _EXAMPLE_DIRECTIVES = {
700
+ "setup.cfg - long_description and version": """
701
+ [metadata]
702
+ name = testing
703
+ version = file: src/VERSION.txt
704
+ license_files = DOWHATYOUWANT
705
+ long_description = file: README.rst, USAGE.rst
706
+ """,
707
+ "pyproject.toml - static readme/license files and dynamic version": """
708
+ [project]
709
+ name = "testing"
710
+ readme = "USAGE.rst"
711
+ license = {file = "DOWHATYOUWANT"}
712
+ dynamic = ["version"]
713
+ [tool.setuptools.dynamic]
714
+ version = {file = ["src/VERSION.txt"]}
715
+ """,
716
+ "pyproject.toml - directive with str instead of list": """
717
+ [project]
718
+ name = "testing"
719
+ readme = "USAGE.rst"
720
+ license = {file = "DOWHATYOUWANT"}
721
+ dynamic = ["version"]
722
+ [tool.setuptools.dynamic]
723
+ version = {file = "src/VERSION.txt"}
724
+ """,
725
+ }
726
+
727
+ @pytest.mark.parametrize("config", _EXAMPLE_DIRECTIVES.keys())
728
+ def test_add_files_referenced_by_config_directives(self, source_dir, config):
729
+ config_file, _, _ = config.partition(" - ")
730
+ config_text = self._EXAMPLE_DIRECTIVES[config]
731
+ (source_dir / 'src').mkdir()
732
+ (source_dir / 'src/VERSION.txt').write_text("0.42", encoding="utf-8")
733
+ (source_dir / 'README.rst').write_text("hello world!", encoding="utf-8")
734
+ (source_dir / 'USAGE.rst').write_text("hello world!", encoding="utf-8")
735
+ (source_dir / 'DOWHATYOUWANT').write_text("hello world!", encoding="utf-8")
736
+ (source_dir / config_file).write_text(config_text, encoding="utf-8")
737
+
738
+ dist = Distribution({"packages": []})
739
+ dist.script_name = 'setup.py'
740
+ dist.parse_config_files()
741
+
742
+ cmd = sdist(dist)
743
+ cmd.ensure_finalized()
744
+ with quiet():
745
+ cmd.run()
746
+
747
+ assert (
748
+ 'src/VERSION.txt' in cmd.filelist.files
749
+ or 'src\\VERSION.txt' in cmd.filelist.files
750
+ )
751
+ assert 'USAGE.rst' in cmd.filelist.files
752
+ assert 'DOWHATYOUWANT' in cmd.filelist.files
753
+ assert '/' not in cmd.filelist.files
754
+ assert '\\' not in cmd.filelist.files
755
+
756
+ def test_pyproject_toml_in_sdist(self, source_dir):
757
+ """
758
+ Check if pyproject.toml is included in source distribution if present
759
+ """
760
+ touch(source_dir / 'pyproject.toml')
761
+ dist = Distribution(SETUP_ATTRS)
762
+ dist.script_name = 'setup.py'
763
+ cmd = sdist(dist)
764
+ cmd.ensure_finalized()
765
+ with quiet():
766
+ cmd.run()
767
+ manifest = cmd.filelist.files
768
+ assert 'pyproject.toml' in manifest
769
+
770
+ def test_pyproject_toml_excluded(self, source_dir):
771
+ """
772
+ Check that pyproject.toml can excluded even if present
773
+ """
774
+ touch(source_dir / 'pyproject.toml')
775
+ with open('MANIFEST.in', 'w', encoding="utf-8") as mts:
776
+ print('exclude pyproject.toml', file=mts)
777
+ dist = Distribution(SETUP_ATTRS)
778
+ dist.script_name = 'setup.py'
779
+ cmd = sdist(dist)
780
+ cmd.ensure_finalized()
781
+ with quiet():
782
+ cmd.run()
783
+ manifest = cmd.filelist.files
784
+ assert 'pyproject.toml' not in manifest
785
+
786
+ def test_build_subcommand_source_files(self, source_dir):
787
+ touch(source_dir / '.myfile~')
788
+
789
+ # Sanity check: without custom commands file list should not be affected
790
+ dist = Distribution({**SETUP_ATTRS, "script_name": "setup.py"})
791
+ cmd = sdist(dist)
792
+ cmd.ensure_finalized()
793
+ with quiet():
794
+ cmd.run()
795
+ manifest = cmd.filelist.files
796
+ assert '.myfile~' not in manifest
797
+
798
+ # Test: custom command should be able to augment file list
799
+ dist = Distribution({**SETUP_ATTRS, "script_name": "setup.py"})
800
+ build = dist.get_command_obj("build")
801
+ build.sub_commands = [*build.sub_commands, ("build_custom", None)]
802
+
803
+ class build_custom(Command):
804
+ def initialize_options(self): ...
805
+
806
+ def finalize_options(self): ...
807
+
808
+ def run(self): ...
809
+
810
+ def get_source_files(self):
811
+ return ['.myfile~']
812
+
813
+ dist.cmdclass.update(build_custom=build_custom)
814
+
815
+ cmd = sdist(dist)
816
+ cmd.use_defaults = True
817
+ cmd.ensure_finalized()
818
+ with quiet():
819
+ cmd.run()
820
+ manifest = cmd.filelist.files
821
+ assert '.myfile~' in manifest
822
+
823
+ @pytest.mark.skipif("os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'stdlib'")
824
+ def test_build_base_pathlib(self, source_dir):
825
+ """
826
+ Ensure if build_base is a pathlib.Path, the build still succeeds.
827
+ """
828
+ dist = Distribution({
829
+ **SETUP_ATTRS,
830
+ "script_name": "setup.py",
831
+ "options": {"build": {"build_base": pathlib.Path('build')}},
832
+ })
833
+ cmd = sdist(dist)
834
+ cmd.ensure_finalized()
835
+ with quiet():
836
+ cmd.run()
837
+
838
+
839
+ def test_default_revctrl():
840
+ """
841
+ When _default_revctrl was removed from the `setuptools.command.sdist`
842
+ module in 10.0, it broke some systems which keep an old install of
843
+ setuptools (Distribute) around. Those old versions require that the
844
+ setuptools package continue to implement that interface, so this
845
+ function provides that interface, stubbed. See #320 for details.
846
+
847
+ This interface must be maintained until Ubuntu 12.04 is no longer
848
+ supported (by Setuptools).
849
+ """
850
+ (ep,) = metadata.EntryPoints._from_text(
851
+ """
852
+ [setuptools.file_finders]
853
+ svn_cvs = setuptools.command.sdist:_default_revctrl
854
+ """
855
+ )
856
+ res = ep.load()
857
+ assert hasattr(res, '__iter__')
858
+
859
+
860
+ class TestRegressions:
861
+ """
862
+ Can be removed/changed if the project decides to change how it handles symlinks
863
+ or external files.
864
+ """
865
+
866
+ @staticmethod
867
+ def files_for_symlink_in_extension_depends(tmp_path, dep_path):
868
+ return {
869
+ "external": {
870
+ "dir": {"file.h": ""},
871
+ },
872
+ "project": {
873
+ "setup.py": cleandoc(
874
+ f"""
875
+ from setuptools import Extension, setup
876
+ setup(
877
+ name="myproj",
878
+ version="42",
879
+ ext_modules=[
880
+ Extension(
881
+ "hello", sources=["hello.pyx"],
882
+ depends=[{dep_path!r}]
883
+ )
884
+ ],
885
+ )
886
+ """
887
+ ),
888
+ "hello.pyx": "",
889
+ "MANIFEST.in": "global-include *.h",
890
+ },
891
+ }
892
+
893
+ @pytest.mark.parametrize(
894
+ "dep_path", ("myheaders/dir/file.h", "myheaders/dir/../dir/file.h")
895
+ )
896
+ def test_symlink_in_extension_depends(self, monkeypatch, tmp_path, dep_path):
897
+ # Given a project with a symlinked dir and a "depends" targeting that dir
898
+ files = self.files_for_symlink_in_extension_depends(tmp_path, dep_path)
899
+ jaraco.path.build(files, prefix=str(tmp_path))
900
+ symlink_or_skip_test(tmp_path / "external", tmp_path / "project/myheaders")
901
+
902
+ # When `sdist` runs, there should be no error
903
+ members = run_sdist(monkeypatch, tmp_path / "project")
904
+ # and the sdist should contain the symlinked files
905
+ for expected in (
906
+ "myproj-42/hello.pyx",
907
+ "myproj-42/myheaders/dir/file.h",
908
+ ):
909
+ assert expected in members
910
+
911
+ @staticmethod
912
+ def files_for_external_path_in_extension_depends(tmp_path, dep_path):
913
+ head, _, tail = dep_path.partition("$tmp_path$/")
914
+ dep_path = tmp_path / tail if tail else head
915
+
916
+ return {
917
+ "external": {
918
+ "dir": {"file.h": ""},
919
+ },
920
+ "project": {
921
+ "setup.py": cleandoc(
922
+ f"""
923
+ from setuptools import Extension, setup
924
+ setup(
925
+ name="myproj",
926
+ version="42",
927
+ ext_modules=[
928
+ Extension(
929
+ "hello", sources=["hello.pyx"],
930
+ depends=[{str(dep_path)!r}]
931
+ )
932
+ ],
933
+ )
934
+ """
935
+ ),
936
+ "hello.pyx": "",
937
+ "MANIFEST.in": "global-include *.h",
938
+ },
939
+ }
940
+
941
+ @pytest.mark.parametrize(
942
+ "dep_path", ("$tmp_path$/external/dir/file.h", "../external/dir/file.h")
943
+ )
944
+ def test_external_path_in_extension_depends(self, monkeypatch, tmp_path, dep_path):
945
+ # Given a project with a "depends" targeting an external dir
946
+ files = self.files_for_external_path_in_extension_depends(tmp_path, dep_path)
947
+ jaraco.path.build(files, prefix=str(tmp_path))
948
+ # When `sdist` runs, there should be no error
949
+ members = run_sdist(monkeypatch, tmp_path / "project")
950
+ # and the sdist should not contain the external file
951
+ for name in members:
952
+ assert "file.h" not in name
953
+
954
+
955
+ def run_sdist(monkeypatch, project):
956
+ """Given a project directory, run the sdist and return its contents"""
957
+ monkeypatch.chdir(project)
958
+ with quiet():
959
+ run_setup("setup.py", ["sdist"])
960
+
961
+ archive = next((project / "dist").glob("*.tar.gz"))
962
+ with tarfile.open(str(archive)) as tar:
963
+ return set(tar.getnames())
964
+
965
+
966
+ def test_sanity_check_setuptools_own_sdist(setuptools_sdist):
967
+ with tarfile.open(setuptools_sdist) as tar:
968
+ files = tar.getnames()
969
+
970
+ # setuptools sdist should not include the .tox folder
971
+ tox_files = [name for name in files if ".tox" in name]
972
+ assert len(tox_files) == 0, f"not empty {tox_files}"
videollama2/lib/python3.10/site-packages/setuptools/tests/test_wheel.py ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """wheel tests"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import contextlib
6
+ import glob
7
+ import inspect
8
+ import os
9
+ import pathlib
10
+ import shutil
11
+ import stat
12
+ import subprocess
13
+ import sys
14
+ import zipfile
15
+ from typing import Any
16
+
17
+ import pytest
18
+ from jaraco import path
19
+ from packaging.tags import parse_tag
20
+ from packaging.utils import canonicalize_name
21
+
22
+ from pkg_resources import PY_MAJOR, Distribution, PathMetadata
23
+ from setuptools.wheel import Wheel
24
+
25
+ from .contexts import tempdir
26
+ from .textwrap import DALS
27
+
28
+ from distutils.sysconfig import get_config_var
29
+ from distutils.util import get_platform
30
+
31
+ WHEEL_INFO_TESTS = (
32
+ ('invalid.whl', ValueError),
33
+ (
34
+ 'simplewheel-2.0-1-py2.py3-none-any.whl',
35
+ {
36
+ 'project_name': 'simplewheel',
37
+ 'version': '2.0',
38
+ 'build': '1',
39
+ 'py_version': 'py2.py3',
40
+ 'abi': 'none',
41
+ 'platform': 'any',
42
+ },
43
+ ),
44
+ (
45
+ 'simple.dist-0.1-py2.py3-none-any.whl',
46
+ {
47
+ 'project_name': 'simple.dist',
48
+ 'version': '0.1',
49
+ 'build': None,
50
+ 'py_version': 'py2.py3',
51
+ 'abi': 'none',
52
+ 'platform': 'any',
53
+ },
54
+ ),
55
+ (
56
+ 'example_pkg_a-1-py3-none-any.whl',
57
+ {
58
+ 'project_name': 'example_pkg_a',
59
+ 'version': '1',
60
+ 'build': None,
61
+ 'py_version': 'py3',
62
+ 'abi': 'none',
63
+ 'platform': 'any',
64
+ },
65
+ ),
66
+ (
67
+ 'PyQt5-5.9-5.9.1-cp35.cp36.cp37-abi3-manylinux1_x86_64.whl',
68
+ {
69
+ 'project_name': 'PyQt5',
70
+ 'version': '5.9',
71
+ 'build': '5.9.1',
72
+ 'py_version': 'cp35.cp36.cp37',
73
+ 'abi': 'abi3',
74
+ 'platform': 'manylinux1_x86_64',
75
+ },
76
+ ),
77
+ )
78
+
79
+
80
+ @pytest.mark.parametrize(
81
+ ('filename', 'info'), WHEEL_INFO_TESTS, ids=[t[0] for t in WHEEL_INFO_TESTS]
82
+ )
83
+ def test_wheel_info(filename, info):
84
+ if inspect.isclass(info):
85
+ with pytest.raises(info):
86
+ Wheel(filename)
87
+ return
88
+ w = Wheel(filename)
89
+ assert {k: getattr(w, k) for k in info.keys()} == info
90
+
91
+
92
+ @contextlib.contextmanager
93
+ def build_wheel(extra_file_defs=None, **kwargs):
94
+ file_defs = {
95
+ 'setup.py': (
96
+ DALS(
97
+ """
98
+ # -*- coding: utf-8 -*-
99
+ from setuptools import setup
100
+ import setuptools
101
+ setup(**%r)
102
+ """
103
+ )
104
+ % kwargs
105
+ ).encode('utf-8'),
106
+ }
107
+ if extra_file_defs:
108
+ file_defs.update(extra_file_defs)
109
+ with tempdir() as source_dir:
110
+ path.build(file_defs, source_dir)
111
+ subprocess.check_call(
112
+ (sys.executable, 'setup.py', '-q', 'bdist_wheel'), cwd=source_dir
113
+ )
114
+ yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
115
+
116
+
117
+ def tree_set(root):
118
+ contents = set()
119
+ for dirpath, dirnames, filenames in os.walk(root):
120
+ for filename in filenames:
121
+ contents.add(os.path.join(os.path.relpath(dirpath, root), filename))
122
+ return contents
123
+
124
+
125
+ def flatten_tree(tree):
126
+ """Flatten nested dicts and lists into a full list of paths"""
127
+ output = set()
128
+ for node, contents in tree.items():
129
+ if isinstance(contents, dict):
130
+ contents = flatten_tree(contents)
131
+
132
+ for elem in contents:
133
+ if isinstance(elem, dict):
134
+ output |= {os.path.join(node, val) for val in flatten_tree(elem)}
135
+ else:
136
+ output.add(os.path.join(node, elem))
137
+ return output
138
+
139
+
140
+ def format_install_tree(tree):
141
+ return {
142
+ x.format(
143
+ py_version=PY_MAJOR,
144
+ platform=get_platform(),
145
+ shlib_ext=get_config_var('EXT_SUFFIX') or get_config_var('SO'),
146
+ )
147
+ for x in tree
148
+ }
149
+
150
+
151
+ def _check_wheel_install(
152
+ filename, install_dir, install_tree_includes, project_name, version, requires_txt
153
+ ):
154
+ w = Wheel(filename)
155
+ egg_path = os.path.join(install_dir, w.egg_name())
156
+ w.install_as_egg(egg_path)
157
+ if install_tree_includes is not None:
158
+ install_tree = format_install_tree(install_tree_includes)
159
+ exp = tree_set(install_dir)
160
+ assert install_tree.issubset(exp), install_tree - exp
161
+
162
+ metadata = PathMetadata(egg_path, os.path.join(egg_path, 'EGG-INFO'))
163
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
164
+ assert dist.project_name == project_name
165
+ assert dist.version == version
166
+ if requires_txt is None:
167
+ assert not dist.has_metadata('requires.txt')
168
+ else:
169
+ # Order must match to ensure reproducibility.
170
+ assert requires_txt == dist.get_metadata('requires.txt').lstrip()
171
+
172
+
173
+ class Record:
174
+ def __init__(self, id, **kwargs):
175
+ self._id = id
176
+ self._fields = kwargs
177
+
178
+ def __repr__(self) -> str:
179
+ return f'{self._id}(**{self._fields!r})'
180
+
181
+
182
+ # Using Any to avoid possible type union issues later in test
183
+ # making a TypedDict is not worth in a test and anonymous/inline TypedDict are experimental
184
+ # https://github.com/python/mypy/issues/9884
185
+ WHEEL_INSTALL_TESTS: tuple[dict[str, Any], ...] = (
186
+ dict(
187
+ id='basic',
188
+ file_defs={'foo': {'__init__.py': ''}},
189
+ setup_kwargs=dict(
190
+ packages=['foo'],
191
+ ),
192
+ install_tree=flatten_tree({
193
+ 'foo-1.0-py{py_version}.egg': {
194
+ 'EGG-INFO': ['PKG-INFO', 'RECORD', 'WHEEL', 'top_level.txt'],
195
+ 'foo': ['__init__.py'],
196
+ }
197
+ }),
198
+ ),
199
+ dict(
200
+ id='utf-8',
201
+ setup_kwargs=dict(
202
+ description='Description accentuée',
203
+ ),
204
+ ),
205
+ dict(
206
+ id='data',
207
+ file_defs={
208
+ 'data.txt': DALS(
209
+ """
210
+ Some data...
211
+ """
212
+ ),
213
+ },
214
+ setup_kwargs=dict(
215
+ data_files=[('data_dir', ['data.txt'])],
216
+ ),
217
+ install_tree=flatten_tree({
218
+ 'foo-1.0-py{py_version}.egg': {
219
+ 'EGG-INFO': ['PKG-INFO', 'RECORD', 'WHEEL', 'top_level.txt'],
220
+ 'data_dir': ['data.txt'],
221
+ }
222
+ }),
223
+ ),
224
+ dict(
225
+ id='extension',
226
+ file_defs={
227
+ 'extension.c': DALS(
228
+ """
229
+ #include "Python.h"
230
+
231
+ #if PY_MAJOR_VERSION >= 3
232
+
233
+ static struct PyModuleDef moduledef = {
234
+ PyModuleDef_HEAD_INIT,
235
+ "extension",
236
+ NULL,
237
+ 0,
238
+ NULL,
239
+ NULL,
240
+ NULL,
241
+ NULL,
242
+ NULL
243
+ };
244
+
245
+ #define INITERROR return NULL
246
+
247
+ PyMODINIT_FUNC PyInit_extension(void)
248
+
249
+ #else
250
+
251
+ #define INITERROR return
252
+
253
+ void initextension(void)
254
+
255
+ #endif
256
+ {
257
+ #if PY_MAJOR_VERSION >= 3
258
+ PyObject *module = PyModule_Create(&moduledef);
259
+ #else
260
+ PyObject *module = Py_InitModule("extension", NULL);
261
+ #endif
262
+ if (module == NULL)
263
+ INITERROR;
264
+ #if PY_MAJOR_VERSION >= 3
265
+ return module;
266
+ #endif
267
+ }
268
+ """
269
+ ),
270
+ },
271
+ setup_kwargs=dict(
272
+ ext_modules=[
273
+ Record(
274
+ 'setuptools.Extension', name='extension', sources=['extension.c']
275
+ )
276
+ ],
277
+ ),
278
+ install_tree=flatten_tree({
279
+ 'foo-1.0-py{py_version}-{platform}.egg': [
280
+ 'extension{shlib_ext}',
281
+ {
282
+ 'EGG-INFO': [
283
+ 'PKG-INFO',
284
+ 'RECORD',
285
+ 'WHEEL',
286
+ 'top_level.txt',
287
+ ]
288
+ },
289
+ ]
290
+ }),
291
+ ),
292
+ dict(
293
+ id='header',
294
+ file_defs={
295
+ 'header.h': DALS(
296
+ """
297
+ """
298
+ ),
299
+ },
300
+ setup_kwargs=dict(
301
+ headers=['header.h'],
302
+ ),
303
+ install_tree=flatten_tree({
304
+ 'foo-1.0-py{py_version}.egg': [
305
+ 'header.h',
306
+ {
307
+ 'EGG-INFO': [
308
+ 'PKG-INFO',
309
+ 'RECORD',
310
+ 'WHEEL',
311
+ 'top_level.txt',
312
+ ]
313
+ },
314
+ ]
315
+ }),
316
+ ),
317
+ dict(
318
+ id='script',
319
+ file_defs={
320
+ 'script.py': DALS(
321
+ """
322
+ #/usr/bin/python
323
+ print('hello world!')
324
+ """
325
+ ),
326
+ 'script.sh': DALS(
327
+ """
328
+ #/bin/sh
329
+ echo 'hello world!'
330
+ """
331
+ ),
332
+ },
333
+ setup_kwargs=dict(
334
+ scripts=['script.py', 'script.sh'],
335
+ ),
336
+ install_tree=flatten_tree({
337
+ 'foo-1.0-py{py_version}.egg': {
338
+ 'EGG-INFO': [
339
+ 'PKG-INFO',
340
+ 'RECORD',
341
+ 'WHEEL',
342
+ 'top_level.txt',
343
+ {'scripts': ['script.py', 'script.sh']},
344
+ ]
345
+ }
346
+ }),
347
+ ),
348
+ dict(
349
+ id='requires1',
350
+ install_requires='foobar==2.0',
351
+ install_tree=flatten_tree({
352
+ 'foo-1.0-py{py_version}.egg': {
353
+ 'EGG-INFO': [
354
+ 'PKG-INFO',
355
+ 'RECORD',
356
+ 'WHEEL',
357
+ 'requires.txt',
358
+ 'top_level.txt',
359
+ ]
360
+ }
361
+ }),
362
+ requires_txt=DALS(
363
+ """
364
+ foobar==2.0
365
+ """
366
+ ),
367
+ ),
368
+ dict(
369
+ id='requires2',
370
+ install_requires=f"""
371
+ bar
372
+ foo<=2.0; {sys.platform!r} in sys_platform
373
+ """,
374
+ requires_txt=DALS(
375
+ """
376
+ bar
377
+ foo<=2.0
378
+ """
379
+ ),
380
+ ),
381
+ dict(
382
+ id='requires3',
383
+ install_requires=f"""
384
+ bar; {sys.platform!r} != sys_platform
385
+ """,
386
+ ),
387
+ dict(
388
+ id='requires4',
389
+ install_requires="""
390
+ foo
391
+ """,
392
+ extras_require={
393
+ 'extra': 'foobar>3',
394
+ },
395
+ requires_txt=DALS(
396
+ """
397
+ foo
398
+
399
+ [extra]
400
+ foobar>3
401
+ """
402
+ ),
403
+ ),
404
+ dict(
405
+ id='requires5',
406
+ extras_require={
407
+ 'extra': f'foobar; {sys.platform!r} != sys_platform',
408
+ },
409
+ requires_txt=DALS(
410
+ """
411
+ [extra]
412
+ """
413
+ ),
414
+ ),
415
+ dict(
416
+ id='requires_ensure_order',
417
+ install_requires="""
418
+ foo
419
+ bar
420
+ baz
421
+ qux
422
+ """,
423
+ extras_require={
424
+ 'extra': """
425
+ foobar>3
426
+ barbaz>4
427
+ bazqux>5
428
+ quxzap>6
429
+ """,
430
+ },
431
+ requires_txt=DALS(
432
+ """
433
+ foo
434
+ bar
435
+ baz
436
+ qux
437
+
438
+ [extra]
439
+ foobar>3
440
+ barbaz>4
441
+ bazqux>5
442
+ quxzap>6
443
+ """
444
+ ),
445
+ ),
446
+ dict(
447
+ id='namespace_package',
448
+ file_defs={
449
+ 'foo': {
450
+ 'bar': {'__init__.py': ''},
451
+ },
452
+ },
453
+ setup_kwargs=dict(
454
+ namespace_packages=['foo'],
455
+ packages=['foo.bar'],
456
+ ),
457
+ install_tree=flatten_tree({
458
+ 'foo-1.0-py{py_version}.egg': [
459
+ 'foo-1.0-py{py_version}-nspkg.pth',
460
+ {
461
+ 'EGG-INFO': [
462
+ 'PKG-INFO',
463
+ 'RECORD',
464
+ 'WHEEL',
465
+ 'namespace_packages.txt',
466
+ 'top_level.txt',
467
+ ]
468
+ },
469
+ {
470
+ 'foo': [
471
+ '__init__.py',
472
+ {'bar': ['__init__.py']},
473
+ ]
474
+ },
475
+ ]
476
+ }),
477
+ ),
478
+ dict(
479
+ id='empty_namespace_package',
480
+ file_defs={
481
+ 'foobar': {
482
+ '__init__.py': (
483
+ "__import__('pkg_resources').declare_namespace(__name__)"
484
+ )
485
+ },
486
+ },
487
+ setup_kwargs=dict(
488
+ namespace_packages=['foobar'],
489
+ packages=['foobar'],
490
+ ),
491
+ install_tree=flatten_tree({
492
+ 'foo-1.0-py{py_version}.egg': [
493
+ 'foo-1.0-py{py_version}-nspkg.pth',
494
+ {
495
+ 'EGG-INFO': [
496
+ 'PKG-INFO',
497
+ 'RECORD',
498
+ 'WHEEL',
499
+ 'namespace_packages.txt',
500
+ 'top_level.txt',
501
+ ]
502
+ },
503
+ {
504
+ 'foobar': [
505
+ '__init__.py',
506
+ ]
507
+ },
508
+ ]
509
+ }),
510
+ ),
511
+ dict(
512
+ id='data_in_package',
513
+ file_defs={
514
+ 'foo': {
515
+ '__init__.py': '',
516
+ 'data_dir': {
517
+ 'data.txt': DALS(
518
+ """
519
+ Some data...
520
+ """
521
+ ),
522
+ },
523
+ }
524
+ },
525
+ setup_kwargs=dict(
526
+ packages=['foo'],
527
+ data_files=[('foo/data_dir', ['foo/data_dir/data.txt'])],
528
+ ),
529
+ install_tree=flatten_tree({
530
+ 'foo-1.0-py{py_version}.egg': {
531
+ 'EGG-INFO': [
532
+ 'PKG-INFO',
533
+ 'RECORD',
534
+ 'WHEEL',
535
+ 'top_level.txt',
536
+ ],
537
+ 'foo': [
538
+ '__init__.py',
539
+ {
540
+ 'data_dir': [
541
+ 'data.txt',
542
+ ]
543
+ },
544
+ ],
545
+ }
546
+ }),
547
+ ),
548
+ )
549
+
550
+
551
+ @pytest.mark.parametrize(
552
+ 'params',
553
+ WHEEL_INSTALL_TESTS,
554
+ ids=[params['id'] for params in WHEEL_INSTALL_TESTS],
555
+ )
556
+ def test_wheel_install(params):
557
+ project_name = params.get('name', 'foo')
558
+ version = params.get('version', '1.0')
559
+ install_requires = params.get('install_requires', [])
560
+ extras_require = params.get('extras_require', {})
561
+ requires_txt = params.get('requires_txt', None)
562
+ install_tree = params.get('install_tree')
563
+ file_defs = params.get('file_defs', {})
564
+ setup_kwargs = params.get('setup_kwargs', {})
565
+ with (
566
+ build_wheel(
567
+ name=project_name,
568
+ version=version,
569
+ install_requires=install_requires,
570
+ extras_require=extras_require,
571
+ extra_file_defs=file_defs,
572
+ **setup_kwargs,
573
+ ) as filename,
574
+ tempdir() as install_dir,
575
+ ):
576
+ _check_wheel_install(
577
+ filename, install_dir, install_tree, project_name, version, requires_txt
578
+ )
579
+
580
+
581
+ def test_wheel_install_pep_503():
582
+ project_name = 'Foo_Bar' # PEP 503 canonicalized name is "foo-bar"
583
+ version = '1.0'
584
+ with (
585
+ build_wheel(
586
+ name=project_name,
587
+ version=version,
588
+ ) as filename,
589
+ tempdir() as install_dir,
590
+ ):
591
+ new_filename = filename.replace(project_name, canonicalize_name(project_name))
592
+ shutil.move(filename, new_filename)
593
+ _check_wheel_install(
594
+ new_filename,
595
+ install_dir,
596
+ None,
597
+ canonicalize_name(project_name),
598
+ version,
599
+ None,
600
+ )
601
+
602
+
603
+ def test_wheel_no_dist_dir():
604
+ project_name = 'nodistinfo'
605
+ version = '1.0'
606
+ wheel_name = f'{project_name}-{version}-py2.py3-none-any.whl'
607
+ with tempdir() as source_dir:
608
+ wheel_path = os.path.join(source_dir, wheel_name)
609
+ # create an empty zip file
610
+ zipfile.ZipFile(wheel_path, 'w').close()
611
+ with tempdir() as install_dir:
612
+ with pytest.raises(ValueError):
613
+ _check_wheel_install(
614
+ wheel_path, install_dir, None, project_name, version, None
615
+ )
616
+
617
+
618
+ def test_wheel_is_compatible(monkeypatch):
619
+ def sys_tags():
620
+ return {
621
+ (t.interpreter, t.abi, t.platform)
622
+ for t in parse_tag('cp36-cp36m-manylinux1_x86_64')
623
+ }
624
+
625
+ monkeypatch.setattr('setuptools.wheel._get_supported_tags', sys_tags)
626
+ assert Wheel('onnxruntime-0.1.2-cp36-cp36m-manylinux1_x86_64.whl').is_compatible()
627
+
628
+
629
+ def test_wheel_mode():
630
+ @contextlib.contextmanager
631
+ def build_wheel(extra_file_defs=None, **kwargs):
632
+ file_defs = {
633
+ 'setup.py': (
634
+ DALS(
635
+ """
636
+ # -*- coding: utf-8 -*-
637
+ from setuptools import setup
638
+ import setuptools
639
+ setup(**%r)
640
+ """
641
+ )
642
+ % kwargs
643
+ ).encode('utf-8'),
644
+ }
645
+ if extra_file_defs:
646
+ file_defs.update(extra_file_defs)
647
+ with tempdir() as source_dir:
648
+ path.build(file_defs, source_dir)
649
+ runsh = pathlib.Path(source_dir) / "script.sh"
650
+ os.chmod(runsh, 0o777)
651
+ subprocess.check_call(
652
+ (sys.executable, 'setup.py', '-q', 'bdist_wheel'), cwd=source_dir
653
+ )
654
+ yield glob.glob(os.path.join(source_dir, 'dist', '*.whl'))[0]
655
+
656
+ params = dict(
657
+ id='script',
658
+ file_defs={
659
+ 'script.py': DALS(
660
+ """
661
+ #/usr/bin/python
662
+ print('hello world!')
663
+ """
664
+ ),
665
+ 'script.sh': DALS(
666
+ """
667
+ #/bin/sh
668
+ echo 'hello world!'
669
+ """
670
+ ),
671
+ },
672
+ setup_kwargs=dict(
673
+ scripts=['script.py', 'script.sh'],
674
+ ),
675
+ install_tree=flatten_tree({
676
+ 'foo-1.0-py{py_version}.egg': {
677
+ 'EGG-INFO': [
678
+ 'PKG-INFO',
679
+ 'RECORD',
680
+ 'WHEEL',
681
+ 'top_level.txt',
682
+ {'scripts': ['script.py', 'script.sh']},
683
+ ]
684
+ }
685
+ }),
686
+ )
687
+
688
+ project_name = params.get('name', 'foo')
689
+ version = params.get('version', '1.0')
690
+ install_tree = params.get('install_tree')
691
+ file_defs = params.get('file_defs', {})
692
+ setup_kwargs = params.get('setup_kwargs', {})
693
+
694
+ with (
695
+ build_wheel(
696
+ name=project_name,
697
+ version=version,
698
+ install_requires=[],
699
+ extras_require={},
700
+ extra_file_defs=file_defs,
701
+ **setup_kwargs,
702
+ ) as filename,
703
+ tempdir() as install_dir,
704
+ ):
705
+ _check_wheel_install(
706
+ filename, install_dir, install_tree, project_name, version, None
707
+ )
708
+ w = Wheel(filename)
709
+ base = pathlib.Path(install_dir) / w.egg_name()
710
+ script_sh = base / "EGG-INFO" / "scripts" / "script.sh"
711
+ assert script_sh.exists()
712
+ if sys.platform != 'win32':
713
+ # Editable file mode has no effect on Windows
714
+ assert oct(stat.S_IMODE(script_sh.stat().st_mode)) == "0o777"
videollama2/lib/python3.10/site-packages/setuptools/tests/text.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ class Filenames:
2
+ unicode = 'smörbröd.py'
3
+ latin_1 = unicode.encode('latin-1')
4
+ utf_8 = unicode.encode('utf-8')
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
5
+ #include <c10/cuda/CUDAMacros.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+
8
+ #include <c10/cuda/CUDACachingAllocator.h>
9
+
10
+ #include <mutex>
11
+
12
+ namespace torch::cuda::CUDAPluggableAllocator {
13
+
14
+ using MallocFuncType = void*(size_t, int, cudaStream_t);
15
+ using FreeFuncType = void(void*, size_t, int, cudaStream_t);
16
+
17
+ // A CUDAPluggableAllocatorDeleterContext object is used as the `ctx`
18
+ // argument for DataPtr. We need context because a user can use
19
+ // multiple allocators in the same PyTorch program, and
20
+ // the allocators can have different free functions, such as:
21
+ // free, cudaFree, cudaFreeAsync, ncclMemFree etc.
22
+ struct TORCH_CUDA_CPP_API CUDAPluggableAllocatorDeleterContext {
23
+ explicit CUDAPluggableAllocatorDeleterContext(
24
+ std::function<FreeFuncType> free_fn,
25
+ void* data,
26
+ size_t size,
27
+ int device,
28
+ cudaStream_t stream);
29
+
30
+ void free();
31
+
32
+ private:
33
+ std::function<FreeFuncType> free_fn_;
34
+ void* data_;
35
+ size_t size_;
36
+ int device_;
37
+ cudaStream_t stream_;
38
+ };
39
+
40
+ #if defined(TORCH_HIP_VERSION)
41
+ using streamType = c10::hip::HIPStream;
42
+ #else
43
+ using streamType = c10::cuda::CUDAStream;
44
+ #endif
45
+
46
+ TORCH_CUDA_CPP_API std::shared_ptr<
47
+ c10::cuda::CUDACachingAllocator::CUDAAllocator>
48
+ getCurrentAllocator();
49
+ TORCH_CUDA_CPP_API std::shared_ptr<
50
+ c10::cuda::CUDACachingAllocator::CUDAAllocator>
51
+ createCustomAllocator(
52
+ std::function<MallocFuncType> alloc_fn,
53
+ std::function<FreeFuncType> free_fn);
54
+ TORCH_CUDA_CPP_API void changeCurrentAllocator(
55
+ const std::shared_ptr<c10::cuda::CUDACachingAllocator::CUDAAllocator>&
56
+ allocator);
57
+
58
+ struct _AllocationMetadata {
59
+ _AllocationMetadata();
60
+ _AllocationMetadata(
61
+ size_t size,
62
+ c10::DeviceIndex device_idx,
63
+ cudaStream_t stream);
64
+ size_t size;
65
+ c10::DeviceIndex device_idx;
66
+ cudaStream_t stream;
67
+ };
68
+
69
+ struct TORCH_CUDA_CPP_API CUDAPluggableAllocator
70
+ : public c10::cuda::CUDACachingAllocator::CUDAAllocator {
71
+ CUDAPluggableAllocator(
72
+ std::function<MallocFuncType> alloc_fn,
73
+ std::function<FreeFuncType> free_fn);
74
+
75
+ CUDAPluggableAllocator(CUDAPluggableAllocator& other);
76
+ CUDAPluggableAllocator& operator=(CUDAPluggableAllocator& other) = delete;
77
+
78
+ void set_init_fn(std::function<void(int)> init_fn);
79
+
80
+ void set_reset_fn(std::function<void()> reset_fn);
81
+
82
+ void set_memory_fraction_fn(
83
+ std::function<void(double, int)> memory_fraction_fn);
84
+
85
+ void set_base_alloc_fn(std::function<void*(void*, size_t*)> base_alloc_fn);
86
+
87
+ void set_record_stream_fn(
88
+ std::function<void(void* ptr, cudaStream_t stream)> record_stream_fn);
89
+
90
+ void set_begin_allocate_to_pool(
91
+ std::function<
92
+ void(int, c10::cuda::MempoolId_t, std::function<bool(cudaStream_t)>)>
93
+ capture_begin_fn);
94
+
95
+ void set_end_allocate_to_pool_fn(
96
+ std::function<void(int, c10::cuda::MempoolId_t)> capture_about_to_end_fn);
97
+
98
+ void set_release_pool(
99
+ std::function<void(int, c10::cuda::MempoolId_t)> capture_destroy_fn);
100
+
101
+ void* malloc(size_t size, c10::DeviceIndex device, cudaStream_t stream);
102
+
103
+ c10::DataPtr allocate(size_t size) override;
104
+ c10::DeleterFnPtr raw_deleter() const override;
105
+
106
+ void* raw_alloc(size_t nbytes) override;
107
+ void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) override;
108
+ void raw_delete(void* ptr) override;
109
+ void init(int device_count) override;
110
+ bool initialized() override;
111
+ void setMemoryFraction(double fraction, c10::DeviceIndex device) override;
112
+ void emptyCache() override;
113
+ void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) override;
114
+ void* getBaseAllocation(void* ptr, size_t* size) override;
115
+
116
+ void recordStream(const c10::DataPtr&, streamType stream) override;
117
+
118
+ c10::CachingDeviceAllocator::DeviceStats getDeviceStats(
119
+ c10::DeviceIndex device) override;
120
+ void resetAccumulatedStats(c10::DeviceIndex device) override;
121
+ void resetPeakStats(c10::DeviceIndex device) override;
122
+ c10::cuda::CUDACachingAllocator::SnapshotInfo snapshot() override;
123
+ void beginAllocateToPool(
124
+ c10::DeviceIndex device,
125
+ c10::cuda::MempoolId_t mempool_id,
126
+ std::function<bool(cudaStream_t)>) override;
127
+ void endAllocateToPool(
128
+ c10::DeviceIndex device,
129
+ c10::cuda::MempoolId_t mempool_id) override;
130
+ void releasePool(c10::DeviceIndex device, c10::cuda::MempoolId_t mempool_id)
131
+ override;
132
+ std::shared_ptr<void> getIpcDevPtr(std::string handle) override;
133
+ c10::cuda::CUDACachingAllocator::ShareableHandle shareIpcHandle(
134
+ void*) override;
135
+ void recordHistory(
136
+ bool enabled,
137
+ c10::cuda::CUDACachingAllocator::CreateContextFn context_recorder,
138
+ size_t alloc_trace_max_entries,
139
+ c10::cuda::CUDACachingAllocator::RecordContext when) override;
140
+ void attachOutOfMemoryObserver(
141
+ c10::cuda::CUDACachingAllocator::OutOfMemoryObserver observer) override;
142
+ void attachAllocatorTraceTracker(
143
+ c10::cuda::CUDACachingAllocator::AllocatorTraceTracker tracker) override;
144
+ std::shared_ptr<c10::cuda::CUDACachingAllocator::AllocatorState>
145
+ getCheckpointState(c10::DeviceIndex device, at::cuda::MempoolId_t id)
146
+ override;
147
+ c10::cuda::CUDACachingAllocator::CheckpointDelta setCheckpointPoolState(
148
+ c10::DeviceIndex device,
149
+ std::shared_ptr<c10::cuda::CUDACachingAllocator::AllocatorState> pps)
150
+ override;
151
+ void enablePeerAccess(c10::DeviceIndex dev, c10::DeviceIndex dev_to_access)
152
+ override;
153
+ cudaError_t memcpyAsync(
154
+ void* dst,
155
+ int dstDevice,
156
+ const void* src,
157
+ int srcDevice,
158
+ size_t count,
159
+ cudaStream_t stream,
160
+ bool p2p_enabled) override;
161
+ std::string name() override;
162
+ void copy_data(void* dest, const void* src, std::size_t count) const final;
163
+
164
+ protected:
165
+ std::function<MallocFuncType> alloc_fn_;
166
+ std::function<FreeFuncType> free_fn_;
167
+ std::function<void(int)> init_fn_;
168
+ std::function<void()> reset_fn_;
169
+ std::function<void(double, int)> memory_fraction_fn_;
170
+ std::function<void*(void*, size_t*)> base_alloc_fn_;
171
+ std::function<void(void* ptr, cudaStream_t stream)> record_stream_fn_;
172
+ std::function<
173
+ void(int, c10::cuda::MempoolId_t, std::function<bool(cudaStream_t)>)>
174
+ begin_allocate_to_pool_fn_;
175
+ std::function<void(int, c10::cuda::MempoolId_t)> end_allocate_to_pool_fn_;
176
+ std::function<void(int, c10::cuda::MempoolId_t)> relase_pool_fn_;
177
+ std::mutex allocator_mutex_;
178
+ // We do the bookeeping here in order to simplify custom allocators
179
+ std::unordered_map<void*, _AllocationMetadata> allocation_metadata_;
180
+
181
+ bool initialized_ = false;
182
+ };
183
+ } // namespace torch::cuda::CUDAPluggableAllocator
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_EVENT_INC
2
+ #define THCP_EVENT_INC
3
+
4
+ #include <ATen/cuda/CUDAEvent.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ struct THCPEvent {
8
+ PyObject_HEAD at::cuda::CUDAEvent cuda_event;
9
+ };
10
+ extern PyObject* THCPEventClass;
11
+
12
+ void THCPEvent_init(PyObject* module);
13
+
14
+ inline bool THCPEvent_Check(PyObject* obj) {
15
+ return THCPEventClass && PyObject_IsInstance(obj, THCPEventClass);
16
+ }
17
+
18
+ #endif // THCP_EVENT_INC
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/GdsFile.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_GDSFILE_INC
2
+ #define THCP_GDSFILE_INC
3
+
4
+ #include <torch/csrc/python_headers.h>
5
+
6
+ void initGdsBindings(PyObject* module);
7
+ #endif // THCP_GDSFILE_INC
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_CUDA_MODULE_INC
2
+ #define THCP_CUDA_MODULE_INC
3
+
4
+ PyObject* THCPModule_getDevice_wrap(PyObject* self);
5
+ PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg);
6
+ PyObject* THCPModule_getDeviceName_wrap(PyObject* self, PyObject* arg);
7
+ PyObject* THCPModule_getDriverVersion(PyObject* self);
8
+ PyObject* THCPModule_isDriverSufficient(PyObject* self);
9
+ PyObject* THCPModule_getCurrentBlasHandle_wrap(PyObject* self);
10
+
11
+ #endif
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_STREAM_INC
2
+ #define THCP_STREAM_INC
3
+
4
+ #include <c10/cuda/CUDAStream.h>
5
+ #include <torch/csrc/Stream.h>
6
+ #include <torch/csrc/python_headers.h>
7
+
8
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
9
+ struct THCPStream : THPStream {
10
+ at::cuda::CUDAStream cuda_stream;
11
+ };
12
+ extern PyObject* THCPStreamClass;
13
+
14
+ void THCPStream_init(PyObject* module);
15
+
16
+ inline bool THCPStream_Check(PyObject* obj) {
17
+ return THCPStreamClass && PyObject_IsInstance(obj, THCPStreamClass);
18
+ }
19
+
20
+ #endif // THCP_STREAM_INC
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THCP_H
2
+ #define THCP_H
3
+
4
+ #include <torch/csrc/THP.h>
5
+ #include <torch/csrc/cuda/Event.h>
6
+ #include <torch/csrc/cuda/Module.h>
7
+ #include <torch/csrc/cuda/Stream.h>
8
+ #include <torch/csrc/python_headers.h>
9
+
10
+ #endif
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/cuda/ATenCUDAGeneral.h>
5
+ #include <ATen/cuda/CUDAContext.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <optional>
8
+
9
+ #include <cstddef>
10
+ #include <vector>
11
+
12
+ namespace torch::cuda {
13
+
14
+ using tensor_list2d = std::vector<std::vector<at::Tensor>>;
15
+
16
+ TORCH_CUDA_CU_API std::vector<at::Tensor>& broadcast_out(
17
+ const at::Tensor& tensor,
18
+ std::vector<at::Tensor>& out_tensors);
19
+ TORCH_CUDA_CU_API std::vector<at::Tensor> broadcast(
20
+ const at::Tensor& tensor,
21
+ at::IntArrayRef devices);
22
+ TORCH_CUDA_CU_API tensor_list2d broadcast_coalesced(
23
+ at::TensorList tensors,
24
+ at::IntArrayRef devices,
25
+ size_t buffer_size);
26
+
27
+ TORCH_CUDA_CU_API std::vector<at::Tensor>& scatter_out(
28
+ const at::Tensor& tensor,
29
+ std::vector<at::Tensor>& out_tensors,
30
+ int64_t dim = 0,
31
+ const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
32
+ streams = std::nullopt);
33
+
34
+ TORCH_CUDA_CU_API std::vector<at::Tensor> scatter(
35
+ const at::Tensor& tensor,
36
+ at::IntArrayRef devices,
37
+ const std::optional<std::vector<int64_t>>& chunk_sizes = std::nullopt,
38
+ int64_t dim = 0,
39
+ const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
40
+ streams = std::nullopt);
41
+
42
+ TORCH_CUDA_CU_API at::Tensor& gather_out(
43
+ at::TensorList tensors,
44
+ at::Tensor& out_tensor,
45
+ int64_t dim);
46
+
47
+ TORCH_CUDA_CU_API at::Tensor gather(
48
+ at::TensorList tensors,
49
+ int64_t dim,
50
+ std::optional<int32_t> destination_index);
51
+
52
+ } // namespace torch::cuda
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/cuda/CUDAMacros.h>
4
+ #include <bitset>
5
+ #include <cstddef>
6
+
7
+ namespace torch {
8
+
9
+ using device_set = std::bitset<C10_COMPILE_TIME_MAX_GPUS>;
10
+
11
+ } // namespace torch
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <cstdint>
5
+ #include <optional>
6
+ #include <string>
7
+
8
+ namespace torch::cuda {
9
+
10
+ // C++-only versions of these, for python use
11
+ // those defined in cuda/Module.cpp which also record python state.
12
+ TORCH_CUDA_CU_API void _record_memory_history(
13
+ bool enabled,
14
+ bool record_context = true,
15
+ int64_t trace_alloc_max_entries = 1,
16
+ bool trace_alloc_record_context = false,
17
+ bool record_cpp_context = false);
18
+
19
+ TORCH_CUDA_CU_API void _record_memory_history(
20
+ std::optional<std::string> enabled = "all",
21
+ std::optional<std::string> context = "all",
22
+ const std::string& stacks = "all",
23
+ size_t max_entries = SIZE_MAX);
24
+
25
+ TORCH_CUDA_CU_API std::string _memory_snapshot_pickled();
26
+
27
+ } // namespace torch::cuda
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+
6
+ #include <cstddef>
7
+ #include <optional>
8
+ #include <vector>
9
+
10
+ // NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for
11
+ // HIP 3.1+
12
+ #if defined(__CUDA_BF16_TYPES_EXIST__)
13
+ #define HAS_NCCL_BF16_DATATYPE \
14
+ ((NCCL_MAJOR > 2) || (NCCL_MAJOR == 2) && (NCCL_MINOR >= 10))
15
+ #elif defined(USE_ROCM) && (TORCH_HIP_VERSION >= 301)
16
+ #define HAS_NCCL_BF16_DATATYPE 1
17
+ #else
18
+ #define HAS_NCCL_BF16_DATATYPE 0
19
+ #endif
20
+
21
+ namespace torch::cuda::nccl {
22
+
23
+ /* The following are copied from <nccl.h> and redefined in torch::cuda::nccl
24
+ * namespace */
25
+ /* pytorch should only use the following definition within pytorch scope */
26
+
27
+ /* Opaque handle to communicator to ncclComm*, this will reinterpret as ncclComm
28
+ * in nccl.cpp */
29
+ typedef void* ncclComm_t;
30
+
31
+ /** redefine nccl unique ID in torch scope. this should be identical to native
32
+ * nccl impp. */
33
+ #define NCCL_UNIQUE_ID_BYTES 128
34
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
35
+ typedef struct {
36
+ char internal[NCCL_UNIQUE_ID_BYTES];
37
+ } ncclUniqueId;
38
+
39
+ /* Error type */
40
+ enum class ncclResult {
41
+ Success = 0,
42
+ UnhandledCudaError = 1,
43
+ SystemError = 2,
44
+ InternalError = 3,
45
+ InvalidArgument = 4,
46
+ InvalidUsage = 5,
47
+ RemoteError = 6,
48
+ InProgress = 7,
49
+ NumResults = 8
50
+ };
51
+
52
+ /* Reduction operation selector */
53
+ enum class ncclRedOp { Sum = 0, Prod = 1, Max = 2, Min = 3, NumOps = 4 };
54
+
55
+ /* Data types */
56
+ enum class ncclDataType {
57
+ Int8 = 0,
58
+ Char = 0,
59
+ Uint8 = 1,
60
+ Int32 = 2,
61
+ Int = 2,
62
+ Uint32 = 3,
63
+ Int64 = 4,
64
+ Uint64 = 5,
65
+ Float16 = 6,
66
+ Half = 6,
67
+ Float32 = 7,
68
+ Float = 7,
69
+ Float64 = 8,
70
+ Double = 8,
71
+ Bfloat16 = 9,
72
+ NumTypes = 10
73
+ };
74
+
75
+ // RAII helper class to manage NCCL group API and CUDA free mutex.
76
+ // The destructor is allowed to throw since this helper class only
77
+ // manages group and lock lifetimes.
78
+ struct AutoNcclGroup {
79
+ AutoNcclGroup();
80
+ AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking);
81
+ ~AutoNcclGroup() noexcept(false);
82
+ ncclComm_t comm_;
83
+ bool comm_nonblocking_;
84
+ };
85
+
86
+ // NOTE: this is exposed only so that python_nccl.cpp can some of these helpers.
87
+ // Don't use them outside of these files.
88
+ namespace detail {
89
+
90
+ TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status);
91
+
92
+ inline void NCCL_CHECK(ncclResult status) {
93
+ if (status != ncclResult::Success) {
94
+ throw_nccl_error(status);
95
+ }
96
+ }
97
+
98
+ TORCH_CUDA_CPP_API at::ArrayRef<ncclComm_t> get_communicators(
99
+ at::TensorList inputs);
100
+ TORCH_CUDA_CPP_API void check_inputs(
101
+ at::TensorList inputs,
102
+ at::TensorList outputs,
103
+ int input_multiplier,
104
+ int output_multiplier);
105
+ TORCH_CUDA_CPP_API void check_inputs(
106
+ at::TensorList inputs,
107
+ const at::Tensor& output,
108
+ int root,
109
+ int input_multiplier,
110
+ int output_multiplier);
111
+
112
+ } // namespace detail
113
+
114
+ using comm_list = std::vector<ncclComm_t>;
115
+ using stream_list = std::vector<std::optional<at::cuda::CUDAStream>>;
116
+
117
+ TORCH_CUDA_CPP_API std::uint64_t version();
118
+ TORCH_CUDA_CPP_API const char* version_suffix();
119
+
120
+ bool is_available(at::TensorList tensors);
121
+
122
+ TORCH_CUDA_CPP_API void get_unique_id(ncclUniqueId& id);
123
+ TORCH_CUDA_CPP_API ncclComm_t
124
+ comm_init_rank(int nranks, const ncclUniqueId& comm_id, int rank);
125
+ TORCH_CUDA_CPP_API void comm_destroy(ncclComm_t comm);
126
+
127
+ TORCH_CUDA_CPP_API void broadcast(
128
+ at::TensorList tensors,
129
+ const stream_list& streams = {},
130
+ const comm_list& user_comms = {});
131
+
132
+ size_t get_max_count();
133
+
134
+ TORCH_CUDA_CPP_API void reduce(
135
+ const std::vector<at::Tensor>& inputs,
136
+ at::Tensor& output,
137
+ int32_t root = 0,
138
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
139
+ const stream_list& streams = {},
140
+ const comm_list& user_comms = {});
141
+
142
+ TORCH_CUDA_CPP_API void reduce(
143
+ std::vector<at::Tensor>& inputs,
144
+ int32_t root = 0,
145
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
146
+ const stream_list& streams = {},
147
+ const comm_list& user_comms = {});
148
+
149
+ TORCH_CUDA_CPP_API void all_reduce(
150
+ const std::vector<at::Tensor>& inputs,
151
+ std::vector<at::Tensor>& outputs,
152
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
153
+ const stream_list& streams = {},
154
+ const comm_list& user_comms = {});
155
+
156
+ TORCH_CUDA_CPP_API void reduce_scatter(
157
+ const std::vector<at::Tensor>& inputs,
158
+ std::vector<at::Tensor>& outputs,
159
+ int32_t op = static_cast<int>(ncclRedOp::Sum),
160
+ const stream_list& streams = {},
161
+ const comm_list& user_comms = {});
162
+
163
+ TORCH_CUDA_CPP_API void scatter(
164
+ const std::vector<at::Tensor>& inputs,
165
+ at::Tensor& outputs,
166
+ ncclComm_t comm,
167
+ at::cuda::CUDAStream& stream,
168
+ int32_t root = 0);
169
+
170
+ TORCH_CUDA_CPP_API void all_gather(
171
+ const std::vector<at::Tensor>& inputs,
172
+ std::vector<at::Tensor>& outputs,
173
+ const stream_list& streams = {},
174
+ const comm_list& user_comms = {});
175
+
176
+ TORCH_CUDA_CPP_API void gather(
177
+ const at::Tensor& inputs,
178
+ std::vector<at::Tensor>& outputs,
179
+ ncclComm_t comm,
180
+ at::cuda::CUDAStream& stream,
181
+ int32_t root = 0);
182
+
183
+ TORCH_CUDA_CPP_API void all2all_single_equal_split(
184
+ at::Tensor& input,
185
+ at::Tensor& output,
186
+ int size,
187
+ ncclComm_t comm,
188
+ at::cuda::CUDAStream& stream);
189
+
190
+ TORCH_CUDA_CPP_API void all2all_single_unequal_split(
191
+ void* sendbuff,
192
+ const size_t* sendcounts,
193
+ const size_t* senddispls,
194
+ void* recvbuff,
195
+ const size_t* recvcounts,
196
+ const size_t* recvdispls,
197
+ size_t size,
198
+ c10::ScalarType type,
199
+ ncclComm_t comm,
200
+ at::cuda::CUDAStream& stream);
201
+
202
+ TORCH_CUDA_CPP_API void all2all(
203
+ std::vector<at::Tensor>& outputTensors,
204
+ std::vector<at::Tensor>& inputTensors,
205
+ ncclComm_t _comm,
206
+ at::cuda::CUDAStream& stream);
207
+
208
+ TORCH_CUDA_CPP_API void send(
209
+ const at::Tensor& input,
210
+ ncclComm_t comm,
211
+ at::cuda::CUDAStream stream,
212
+ int dst);
213
+
214
+ TORCH_CUDA_CPP_API void recv(
215
+ at::Tensor& output,
216
+ ncclComm_t comm,
217
+ at::cuda::CUDAStream stream,
218
+ int src);
219
+ } // namespace torch::cuda::nccl
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::cuda::python {
4
+
5
+ void initCommMethods(PyObject* module);
6
+
7
+ } // namespace torch::cuda::python
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ PyObject* THCPModule_nccl_version(PyObject* self, PyObject* args);
6
+ PyObject* THCPModule_nccl_version_suffix(PyObject* self, PyObject* args);
7
+ PyObject* THCPModule_nccl_unique_id(PyObject* self, PyObject* args);
8
+ PyObject* THCPModule_nccl_init_rank(PyObject* self, PyObject* args);
9
+ PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args);
10
+ PyObject* THCPModule_nccl_all_reduce(PyObject* self, PyObject* args);
11
+ PyObject* THCPModule_nccl_broadcast(PyObject* self, PyObject* args);
12
+ PyObject* THCPModule_nccl_all_gather(PyObject* self, PyObject* args);
13
+ PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args);
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <memory>
4
+ #include <ostream>
5
+ #include <string>
6
+ #include <unordered_map>
7
+
8
+ // `TorchScript` offers a simple logging facility that can enabled by setting an
9
+ // environment variable `PYTORCH_JIT_LOG_LEVEL`.
10
+
11
+ // Logging is enabled on a per file basis. To enable logging in
12
+ // `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be
13
+ // set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination`
14
+ // (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`).
15
+
16
+ // Multiple files can be logged by separating each file name with a colon `:` as
17
+ // in the following example,
18
+ // `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination`
19
+
20
+ // There are 3 logging levels available for your use ordered by the detail level
21
+ // from lowest to highest.
22
+
23
+ // * `GRAPH_DUMP` should be used for printing entire graphs after optimization
24
+ // passes
25
+ // * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e.
26
+ // node deletion, constant folding, etc)
27
+ // * `GRAPH_DEBUG` should be used for providing information useful for debugging
28
+ // the internals of a particular optimization pass or analysis
29
+
30
+ // The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP`
31
+ // statements will be enabled when one specifies a file(s) in
32
+ // `PYTORCH_JIT_LOG_LEVEL`.
33
+
34
+ // `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in
35
+ // `>alias_analysis`.
36
+ // `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in
37
+ // `>>alias_analysis`.
38
+ // `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there
39
+ // is no logging level that is higher than `GRAPH_DEBUG`.
40
+
41
+ namespace torch::jit {
42
+
43
+ struct Node;
44
+ struct Graph;
45
+
46
+ enum class JitLoggingLevels {
47
+ GRAPH_DUMP = 0,
48
+ GRAPH_UPDATE,
49
+ GRAPH_DEBUG,
50
+ };
51
+
52
+ TORCH_API std::string get_jit_logging_levels();
53
+
54
+ TORCH_API void set_jit_logging_levels(std::string level);
55
+
56
+ TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream);
57
+
58
+ TORCH_API std::ostream& get_jit_logging_output_stream();
59
+
60
+ TORCH_API std::string getHeader(const Node* node);
61
+
62
+ TORCH_API std::string log_function(const std::shared_ptr<Graph>& graph);
63
+
64
+ TORCH_API ::torch::jit::JitLoggingLevels jit_log_level();
65
+
66
+ // Prefix every line in a multiline string \p IN_STR with \p PREFIX.
67
+ TORCH_API std::string jit_log_prefix(
68
+ const std::string& prefix,
69
+ const std::string& in_str);
70
+
71
+ TORCH_API std::string jit_log_prefix(
72
+ ::torch::jit::JitLoggingLevels level,
73
+ const char* fn,
74
+ int l,
75
+ const std::string& in_str);
76
+
77
+ TORCH_API bool is_enabled(
78
+ const char* cfname,
79
+ ::torch::jit::JitLoggingLevels level);
80
+
81
+ TORCH_API std::ostream& operator<<(
82
+ std::ostream& out,
83
+ ::torch::jit::JitLoggingLevels level);
84
+
85
+ #define JIT_LOG(level, ...) \
86
+ if (is_enabled(__FILE__, level)) { \
87
+ ::torch::jit::get_jit_logging_output_stream() \
88
+ << ::torch::jit::jit_log_prefix( \
89
+ level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \
90
+ }
91
+
92
+ // tries to reconstruct original python source
93
+ #define SOURCE_DUMP(MSG, G) \
94
+ JIT_LOG( \
95
+ ::torch::jit::JitLoggingLevels::GRAPH_DUMP, \
96
+ MSG, \
97
+ "\n", \
98
+ ::torch::jit::log_function(G));
99
+ // use GRAPH_DUMP for dumping graphs after optimization passes
100
+ #define GRAPH_DUMP(MSG, G) \
101
+ JIT_LOG( \
102
+ ::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString());
103
+ // use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion,
104
+ // constant folding, CSE)
105
+ #define GRAPH_UPDATE(...) \
106
+ JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__);
107
+ // use GRAPH_DEBUG to provide information useful for debugging a particular opt
108
+ // pass
109
+ #define GRAPH_DEBUG(...) \
110
+ JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__);
111
+ // use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script
112
+ #define GRAPH_EXPORT(MSG, G) \
113
+ JIT_LOG( \
114
+ ::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \
115
+ MSG, \
116
+ "\n<GRAPH_EXPORT>\n", \
117
+ (G)->toString(), \
118
+ "</GRAPH_EXPORT>");
119
+
120
+ #define GRAPH_DUMP_ENABLED \
121
+ (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP))
122
+ #define GRAPH_UPDATE_ENABLED \
123
+ (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE))
124
+ #define GRAPH_DEBUG_ENABLED \
125
+ (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG))
126
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/Export.h>
3
+ #include <string>
4
+ #include <unordered_map>
5
+
6
+ // `TorchScript` offers a simple optimization limit checker
7
+ // that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`.
8
+ // The purpose is to limit how many optimization you can make per pass.
9
+ // This is useful for debugging any passes.
10
+
11
+ // Opt limit checker is enabled on a per file basis (hence per pass). For
12
+ // example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set
13
+ // to `constant_propagation=<opt_limit>` or, simply, to
14
+ // `constant_propagation=<opt_limit>` where <opt_limit> is the number of
15
+ // optimizations you want to make for the pass. (i.e.
16
+ // `PYTORCH_JIT_OPT_LIMIT="constant_propagation=<opt_limit>"`).
17
+
18
+ // Multiple files can be configured by separating each file name with a colon
19
+ // `:` as in the following example,
20
+ // `PYTORCH_JIT_OPT_LIMIT="constant_propagation=<opt_limit>:dead_code_elimination=<opt_limit>"`
21
+
22
+ // You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if
23
+ // we haven't reached the optimization limit yet. Otherwise, it will return
24
+ // false. Typical usage:
25
+
26
+ // if (!JIT_OPT_ALLOWED) {
27
+ // GRAPH_DUMP(...); //supplied from jit_log
28
+ // return;
29
+ // }
30
+
31
+ namespace torch::jit {
32
+
33
+ TORCH_API bool opt_limit(const char* pass_name);
34
+
35
+ #define JIT_OPT_ALLOWED opt_limit(__FILE__)
36
+
37
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ TORCH_API bool AddIfThenElseOp(std::shared_ptr<Graph>& graph);
8
+
9
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ TORCH_API std::shared_ptr<Graph> Canonicalize(
8
+ const std::shared_ptr<Graph>& graph,
9
+ bool keep_unique_names = true);
10
+
11
+ TORCH_API void CanonicalizeOutputs(std::shared_ptr<Graph>& graph);
12
+
13
+ TORCH_API std::optional<const Use> firstOrLastUse(Value* v, bool find_first);
14
+
15
+ TORCH_API bool isBeforeOrAfter(
16
+ const Use& a,
17
+ const Use& b,
18
+ bool checking_before);
19
+
20
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize_graph_fuser_ops.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ TORCH_API void CanonicalizeOps(const std::shared_ptr<Graph>& graph);
8
+
9
+ }
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ TORCH_API void CheckStrictFusion(std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Eliminates common inputs among `aten::cat` ops.
8
+ TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr<Graph>& graph);
9
+
10
+ // Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
11
+ // in the buffers used for concatenation if possible.
12
+ TORCH_API void ExpandConcatAndEliminateRedundancy(
13
+ const std::shared_ptr<Graph>& graph);
14
+
15
+ TORCH_API bool CombineConcats(const std::shared_ptr<Graph>& graph);
16
+
17
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Runs constant propagation on all objects unless ignore_custom_classes is
8
+ // specified as true, in which case user defined classes are skipped. This is
9
+ // useful to prevent early fusion of packing operations, which end up lowering
10
+ // away information about their constructors (e.g. packed::linear_clamp_prepack
11
+ // and prepacked::conv2d_clamp_prepack)
12
+ // Returns True if the pass made a change to the graph
13
+ TORCH_API bool ConstantPropagation(
14
+ std::shared_ptr<Graph>& graph,
15
+ bool ignore_custom_classes = false);
16
+
17
+ // runs constant propagation only on ops that have non-aliasing inputs & outputs
18
+ // Returns True if the pass made a change to the graph
19
+ TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr<Graph>& graph);
20
+
21
+ // Runs the node if its inputs are constants. Callers of this function must
22
+ // make their own determination if constant prop is appropriate - for example
23
+ // non-deterministic ops or ops with side effects. If ignore_custom_classes is
24
+ // specified, nodes that output user defined classes are not run.
25
+ TORCH_API std::optional<Stack> runNodeIfInputsAreConstant(
26
+ const Node* node,
27
+ bool ignore_custom_classes = false,
28
+ AliasDb* db = nullptr);
29
+
30
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ TORCH_API void CreateFunctionalGraphs(const std::shared_ptr<Graph>& graph);
9
+
10
+ TORCH_API void InlineFunctionalGraphs(const std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ TORCH_API void DecomposeOps(std::shared_ptr<Graph>& graph);
8
+
9
+ }
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/erase_number_types.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Erase NumberType information. This is necessary for and only used in
8
+ // exporting to ONNX. This pass ensures that no remaining Values have
9
+ // NumberType types, replacing them with tensors.
10
+ // The following things are done to erase NumberType info:
11
+ // - NumberType outputs are changed to DynamicType.
12
+ // - prim::Constant nodes which are numbers get changed into 0-dim tensors of
13
+ // the corresponding type
14
+ // - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
15
+ // are erased.
16
+ //
17
+ // The pass assumes that DCE will be called sometime after.
18
+ TORCH_API void EraseNumberTypes(const std::shared_ptr<Graph>& graph);
19
+ TORCH_API void EraseNumberTypesOnBlock(Block* block);
20
+
21
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fixup_trace_scope_blocks.h ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ // Directly after tracing, we have an ill-formed graph with blocks inserted.
9
+ // Example:
10
+ //
11
+ // graph(%self : ClassType<Module>,
12
+ // %input.1 : Float(3, 4)):
13
+ // %1 : ClassType<Module> = prim::GetAttr[name="relu1"](%self)
14
+ // %2 : ClassType<Module> = prim::GetAttr[name="relu2"](%self)
15
+ // %3 : ClassType<Module> = prim::GetAttr[name="rrr"](%2)
16
+ // = prim::TracedModuleForward[scope="__module.relu1"]()
17
+ // block0():
18
+ // %input : Float(3, 4) = aten::relu(%input.1),
19
+ // -> ()
20
+ // = prim::TracedModuleForward[scope="__module.relu2"](),
21
+ // block0():
22
+ // = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
23
+ // block0():
24
+ // %6 : Float(3, 4) = aten::relu(%input),
25
+ // -> ()
26
+ // -> ()
27
+ // return (%6)
28
+ //
29
+ // In this pass, we:
30
+ // 1) Lift Value defs to as high of a scope as needed to ensure that
31
+ // they dominate all their uses. For example, `input` in the above
32
+ // graph needs to be lifted to the top-level block so that its use
33
+ // in the second `relu` operator is dominated.
34
+ // 2) Lambda lift the blocks. This ensures that all values used within
35
+ // each scope have their defs captured.
36
+ // 3) Convert the scope blocks into methods on their respective Modules,
37
+ // and convert TracedModuleForward nodes to CallMethod nodes into those
38
+ // methods.
39
+ //
40
+ // Then, we'll have a well-formed graph with proper method calls.
41
+ TORCH_API void FixupTraceScopeBlocks(
42
+ std::shared_ptr<Graph>& graph,
43
+ Module* self);
44
+
45
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_ops_to_mkldnn.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Converts operators & their parameters to mkldnn if it is profitable
8
+ // Currently encompassing Conv2d and Conv3d, and Linear
9
+ // Op must be in float32 and mkldnn must be built
10
+ // This pass only works on frozen graph
11
+ TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr<Graph>& graph);
12
+
13
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // This pass removes 'grad_of' nodes, replacing them with conditionals of
8
+ // the form:
9
+ // if any_defined(inputs):
10
+ // outputs = <original_computation>
11
+ // else:
12
+ // outputs = undefineds
13
+ TORCH_API void LowerGradOf(Graph& g);
14
+
15
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_graph.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
8
+
9
+ // Given a graph with of a method which first argument is %self, lower it to a
10
+ // graph where all attributes accesses are replaced with explicit inputs of the
11
+ // graph (rather than results of prim::GetAttr executed on %self).
12
+ //
13
+ // Returns a tuple (graph, parameters) where the last module.parameters.size()
14
+ // inputs to the graph are the trainable parameters used in this method. The
15
+ // remaining inputs are the true inputs to the function.
16
+ TORCH_API std::pair<std::shared_ptr<Graph>, std::vector<IValue>> LowerGraph(
17
+ Graph& graph,
18
+ const ModulePtr& self);
19
+
20
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
7
+
8
+ #if AT_MKLDNN_ENABLED()
9
+
10
+ #include <ideep/tensor.hpp>
11
+
12
+ #endif // AT_MKLDNN_ENABLED()
13
+
14
+ namespace torch::jit {
15
+
16
+ #if AT_MKLDNN_ENABLED()
17
+
18
+ namespace mkldnn {
19
+
20
+ const static std::map<std::string, std::vector<torch::jit::MatchFilter>>
21
+ fusion_rewrite_map = {
22
+ {"none", {}},
23
+ {"relu", {}},
24
+ };
25
+
26
+ } // namespace mkldnn
27
+
28
+ #endif // AT_MKLDNN_ENABLED()
29
+
30
+ void FuseConvWithEltwise(std::shared_ptr<Graph>& graph);
31
+
32
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // This pass converts aten ops to a normalized form. It is
8
+ // run immediately after IR generation in both the tracer and compiler,
9
+ // so downstream consumers of the IR do not need handle ops in their
10
+ // pre-normalized form.
11
+ // Currently only handles normalization of op aliases.
12
+ TORCH_API void NormalizeOps(const std::shared_ptr<Graph>& graph);
13
+
14
+ const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap();
15
+
16
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /* `getCustomPrePasses()` returns a vector of passes that will be executed
6
+ * after differentiation but before any fusion. This is the de-facto location
7
+ * for compiler backends to insert passes.
8
+ *
9
+ * `getCustomPostPasses()` returns a vector of passes that will be
10
+ * executed after differentiation and after fusion (if any). This is the
11
+ * location for fusion cleanup passes if they are needed.
12
+ *
13
+ * Static registration of a pass can be done by creating a global
14
+ * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit.
15
+ *
16
+ * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which
17
+ * modify the IR graph in place.
18
+ */
19
+
20
+ namespace torch::jit {
21
+
22
+ // A pass modifies a Graph in place.
23
+ using GraphPass = std::function<void(std::shared_ptr<Graph>&)>;
24
+
25
+ // Since Passes are std::functions, we associate a UUID to each pass, this way
26
+ // if we want to deregister a pass, we have something to reference it by.
27
+ using GraphPassNameType = unsigned int;
28
+
29
+ // Graph pass entries have a name associated with them
30
+ using GraphPassEntry = std::pair<GraphPass, GraphPassNameType>;
31
+
32
+ // Return currently registered passes. Passes are stored in a static vector
33
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
34
+ getCustomPostPasses();
35
+ TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
36
+ getCustomPrePasses();
37
+
38
+ TORCH_API GraphPassNameType registerPostPass(GraphPass p);
39
+ TORCH_API GraphPassNameType registerPrePass(GraphPass p);
40
+
41
+ // Look up pass by name passed in, remove it from registered passes
42
+ TORCH_API void clearPostPass(GraphPassNameType p);
43
+ TORCH_API void clearPrePass(GraphPassNameType p);
44
+
45
+ // Remove all passes
46
+ TORCH_API void clearAllPostPasses();
47
+ TORCH_API void clearAllPrePasses();
48
+
49
+ // LEGACY CALL
50
+ struct TORCH_API RegisterPostPass {
51
+ RegisterPostPass(GraphPass p);
52
+ };
53
+
54
+ using RegisterPass = RegisterPostPass;
55
+
56
+ /*
57
+ * PassManager is a wrapper on the register/clear PostPass functions above. It
58
+ * will register the pass provided in "registerPass" and will hold on to its
59
+ * associated name that way clearPass can be later called and will delete the
60
+ * pass used to register when called.
61
+ *
62
+ * PassManager is templated because we want static variables based on a
63
+ * particular GraphPass. When deriving from PassManager, you should send as the
64
+ * template parameter your derived class as you would for the curiously
65
+ * recurring template pattern. This template parameter isn't actually used and
66
+ * is simply done to prevent static members from being shared across derived
67
+ * types.
68
+ */
69
+ template <typename DerivedType>
70
+ struct C10_EXPORT PassManager {
71
+ private:
72
+ // We want this class to be abstract because it's
73
+ virtual void abstract() = 0;
74
+
75
+ protected:
76
+ /*
77
+ * isRegistered() will return if a pass has been registered
78
+ * isRegistered(true) will change the value of the internal static bool
79
+ *
80
+ * There's an internal static bool to this function to keep track of the
81
+ * state, this is so when functions are derived from this class, they don't
82
+ * have to worry about initializing the static members.
83
+ */
84
+ static bool isRegistered(bool flip_bit = false) {
85
+ static bool val = false;
86
+ if (flip_bit)
87
+ val = !val;
88
+ return val;
89
+ }
90
+
91
+ /*
92
+ * name() will return the name of the registered pass
93
+ * name(pass_name, true) will set the name of the pass
94
+ * Similarly to isRegistered we use an internal static variable to hold the
95
+ * name.
96
+ */
97
+ static GraphPassNameType passID(
98
+ GraphPassNameType PassID = 0,
99
+ bool set = false) {
100
+ static GraphPassNameType pass_id = 0;
101
+ if (set)
102
+ pass_id = PassID;
103
+ return pass_id;
104
+ }
105
+
106
+ public:
107
+ // registerPass(pass) will register the pass provided and set the
108
+ // name/isRegistered functions appropriately, it returns a bool value
109
+ // indicating whether the given pass is already registered previously.
110
+ static bool registerPass(GraphPass p) {
111
+ if (!isRegistered()) {
112
+ // If we don't already have a registered pass, register pass
113
+ // hold on to its name, change isRegistered to true
114
+ passID(registerPostPass(std::move(p)), true);
115
+ isRegistered(true);
116
+ return false;
117
+ }
118
+ return true;
119
+ }
120
+
121
+ // Calls ClearPostPass(passID())
122
+ static void clearPass() {
123
+ // If the pass is registered, clear it and change isRegistered to false.
124
+ if (isRegistered()) {
125
+ clearPostPass(passID());
126
+ isRegistered(true);
127
+ }
128
+ }
129
+
130
+ // clang-tidy requires virtual destructor;
131
+ virtual ~PassManager() = default;
132
+ };
133
+
134
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_alias_sensitive.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Peephole Optimizes alias sensitive peepholes
8
+ // Currently this is invoked as part of PeepholeOptimize
9
+ // return true if graph is modified
10
+ // Optimizes on TensorType if shape_peepholes is true
11
+ TORCH_API bool PeepholeOptimizeAliasSensitive(
12
+ const std::shared_ptr<Graph>& graph,
13
+ bool shape_peepholes);
14
+
15
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // updates the types of tuples according to the type of their current inputs.
8
+ TORCH_API void RefineTupleTypes(std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ TORCH_API void RemoveExpands(const std::shared_ptr<Graph>& graph);
8
+
9
+ }
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_mutation.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/ir/alias_analysis.h>
6
+ #include <torch/csrc/jit/ir/ir.h>
7
+
8
+ #include <utility>
9
+
10
+ namespace torch::jit {
11
+
12
+ struct TORCH_API MutationRemover {
13
+ MutationRemover(
14
+ std::shared_ptr<Graph> graph,
15
+ std::optional<std::function<bool(Node*)>> mutation_filter = std::nullopt)
16
+ : mutation_filter_(std::move(mutation_filter)),
17
+ aliasDb_(nullptr),
18
+ graph_(std::move(graph)) {}
19
+
20
+ // return true if graph is modified
21
+ bool removeListMutation();
22
+
23
+ // return true if graph is modified
24
+ bool removeTensorMutation();
25
+
26
+ bool isSpecialMappedOp(Node* n) {
27
+ return n->matches("aten::zero_(Tensor(a!) self) -> Tensor(a!)") ||
28
+ n->matches(
29
+ "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)") ||
30
+ n->matches(
31
+ "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)");
32
+ }
33
+
34
+ bool inplaceOpVariant(Node* n);
35
+
36
+ static bool hasSideEffectOrAlias(Value* v, AliasDb* aliasDb);
37
+
38
+ private:
39
+ Node* createSpecialMappedOp(Node* n);
40
+ bool listMutationFollowingListConstruct(Node* n);
41
+ bool tryMakeCreationAndMutationAtomic(
42
+ Value* mutated_value,
43
+ Node* mutating_op);
44
+ bool tryMakeUnaliasedIfOutputAndMutationAtomic(
45
+ Value* mutated_value,
46
+ Node* mutating_op);
47
+ // return true if graph is modified
48
+ bool RemoveListMutation(Block* block);
49
+ // return true if graph is modified
50
+ bool RemoveTensorMutation(Block* block);
51
+
52
+ AliasDb* getOrCreateAliasDb() {
53
+ if (!aliasDb_) {
54
+ aliasDb_ = std::make_unique<AliasDb>(graph_);
55
+ }
56
+ return aliasDb_.get();
57
+ }
58
+
59
+ std::optional<std::function<bool(Node*)>> mutation_filter_;
60
+ std::unique_ptr<AliasDb> aliasDb_ = nullptr;
61
+ std::shared_ptr<Graph> graph_;
62
+ };
63
+
64
+ // Removes list mutation with functional equivalents
65
+ // return true if graph is modified
66
+ TORCH_API bool RemoveListMutation(const std::shared_ptr<Graph>& graph);
67
+
68
+ // Replaces in-place aten ops with their functional equivalents
69
+ // when it can be proven that this does not change graph semantics
70
+ // if `mutation_filter` is present, the pass will only attempt to
71
+ // remove mutation on nodes which return true for the filter
72
+ // return true if graph is modified
73
+ TORCH_API bool RemoveTensorMutation(
74
+ const std::shared_ptr<Graph>& graph,
75
+ std::optional<std::function<bool(Node*)>> mutation_filter = std::nullopt);
76
+
77
+ // Replaces in-place aten activation ops with their functional equivalence
78
+ TORCH_API bool InplaceToFunctionalActivation(
79
+ const std::shared_ptr<Graph>& graph);
80
+
81
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_redundant_profiles.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ TORCH_API void RemoveRedundantProfiles(std::shared_ptr<Graph>& graph);
8
+ TORCH_API void RemoveRedundantProfiles(Block* block, AliasDb& db);
9
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Find the valid upgrader graph for the upgrader and cache the result
8
+ // for later lookups. Will error out if there is no valid upgrader graph
9
+ // provided for the upgrader name.
10
+ std::shared_ptr<Graph> getUpgraderGraph(const std::string& upgrader_name);
11
+
12
+ TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr<Graph> graph);
13
+
14
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+
5
+ #include <memory>
6
+
7
+ namespace torch::jit {
8
+
9
+ struct Graph;
10
+ struct ArgumentSpec;
11
+
12
+ TORCH_API void PropagateRequiresGrad(std::shared_ptr<Graph>& graph);
13
+
14
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <unordered_map>
6
+ #include <utility>
7
+ #include <variant>
8
+
9
+ namespace torch::jit {
10
+
11
+ // CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
12
+
13
+ TORCH_API void PropagateShapesOnGraph(std::shared_ptr<Graph>& graph);
14
+
15
+ // CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
16
+ // From [beg, end) attempt to propagate shapes and
17
+ // build up a graph that will compute all remaining symbolic
18
+ // shapes in [beg, end) that can be executed before beg
19
+
20
+ struct ShapeComputeGraphMapping {
21
+ ShapeComputeGraphMapping(
22
+ std::shared_ptr<Graph> partial_eval_shape_graph,
23
+ std::unordered_map<Value*, Value*>
24
+ enclosing_graph_value_to_shape_graph_input,
25
+ std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim)
26
+ : partial_eval_shape_graph(std::move(partial_eval_shape_graph)),
27
+ enclosing_graph_value_to_shape_graph_input_(
28
+ std::move(enclosing_graph_value_to_shape_graph_input)),
29
+ graph_output_to_symbolic_shape_dim_(
30
+ std::move(graph_output_to_symbolic_shape_dim)){};
31
+
32
+ std::shared_ptr<Graph> partial_eval_shape_graph;
33
+ std::unordered_map<Value*, Value*>
34
+ enclosing_graph_value_to_shape_graph_input_;
35
+ std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim_;
36
+ };
37
+
38
+ TORCH_API std::optional<ShapeComputeGraphMapping>
39
+ PropagateShapesAndBuildLargeShapeComputeGraph(
40
+ std::shared_ptr<Graph>& graph,
41
+ Node* beg,
42
+ Node* end);
43
+
44
+ // don't insert complete tensor shapes in shape compute graphs and instead
45
+ // rely on our partial evaluation pipeline to propagate information.
46
+ // this is a good proxy for our ability to propagate non-complete shape
47
+ // information.
48
+ TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value);
49
+ TORCH_API bool symbolicShapeAnalysisTestModeEnabled();
50
+
51
+ using SSAInput = std::variant<IValue, c10::SymbolicShape>;
52
+ TORCH_API std::optional<std::vector<c10::SymbolicShape>>
53
+ calculateSymbolicShapesOnOp(
54
+ const FunctionSchema* schema,
55
+ const std::vector<SSAInput>& inputs);
56
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch::jit {
6
+
7
+ // Try to replace an op that takes a list input with another op that takes a
8
+ // variadic number of arguments.
9
+ TORCH_API bool UseVariadicOp(
10
+ const std::shared_ptr<Graph>& graph,
11
+ NodeKind op,
12
+ NodeKind variadic_op);
13
+
14
+ TORCH_API bool RemoveListMutationAndUseVariadicOp(
15
+ const std::shared_ptr<Graph>& graph,
16
+ NodeKind op,
17
+ NodeKind variadic_op);
18
+
19
+ // Convenient functions for replacing aten::stack/aten::cat with their
20
+ // variadic versions.
21
+ TORCH_API bool UseVariadicCat(const std::shared_ptr<Graph>& graph);
22
+ TORCH_API bool RemoveListMutationAndUseVariadicCat(
23
+ const std::shared_ptr<Graph>& graph);
24
+
25
+ TORCH_API bool UseVariadicStack(const std::shared_ptr<Graph>& graph);
26
+ TORCH_API bool RemoveListMutationAndUseVariadicStack(
27
+ const std::shared_ptr<Graph>& graph);
28
+
29
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <functional>
3
+
4
+ namespace torch::jit {
5
+
6
+ class ResourceGuard {
7
+ std::function<void()> _destructor;
8
+ bool _released{false};
9
+
10
+ public:
11
+ ResourceGuard(std::function<void()> destructor)
12
+ : _destructor(std::move(destructor)) {}
13
+
14
+ // NOLINTNEXTLINE(bugprone-exception-escape)
15
+ ~ResourceGuard() {
16
+ if (!_released)
17
+ _destructor();
18
+ }
19
+
20
+ void release() {
21
+ _released = true;
22
+ }
23
+ };
24
+
25
+ } // namespace torch::jit