ZTWHHH commited on
Commit
3336d0d
·
verified ·
1 Parent(s): e5f9e2f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videollama2/lib/python3.10/site-packages/setuptools/cli-64.exe +0 -0
  2. videollama2/lib/python3.10/site-packages/setuptools/gui-32.exe +0 -0
  3. videollama2/lib/python3.10/site-packages/setuptools/gui-64.exe +0 -0
  4. videollama2/lib/python3.10/site-packages/setuptools/gui-arm64.exe +0 -0
  5. videollama2/lib/python3.10/site-packages/setuptools/monkey.py +126 -0
  6. videollama2/lib/python3.10/site-packages/setuptools/msvc.py +1527 -0
  7. videollama2/lib/python3.10/site-packages/setuptools/namespaces.py +106 -0
  8. videollama2/lib/python3.10/site-packages/setuptools/script (dev).tmpl +6 -0
  9. videollama2/lib/python3.10/site-packages/websockets/extensions/__pycache__/permessage_deflate.cpython-310.pyc +0 -0
  10. videollama2/lib/python3.10/site-packages/websockets/legacy/__pycache__/protocol.cpython-310.pyc +0 -0
  11. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_ops.h +28 -0
  12. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h +24 -0
  13. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_native.h +22 -0
  14. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_native.h +23 -0
  15. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_native.h +24 -0
  16. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_ops.h +50 -0
  17. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_native.h +25 -0
  18. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h +26 -0
  19. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl.h +44 -0
  20. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets.h +30 -0
  21. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h +24 -0
  22. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward_ops.h +39 -0
  23. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h +28 -0
  24. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h +25 -0
  25. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_stack_ops.h +39 -0
  26. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h +23 -0
  27. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cpu_dispatch.h +23 -0
  28. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_native.h +26 -0
  29. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta.h +27 -0
  30. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsr_tensor_args.h +30 -0
  31. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  32. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/add_meta_dispatch.h +26 -0
  33. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta.h +37 -0
  34. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atan_cpu_dispatch.h +26 -0
  35. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h +25 -0
  36. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod_native.h +21 -0
  37. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/celu_ops.h +50 -0
  38. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/chunk_ops.h +28 -0
  39. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat_native.h +21 -0
  40. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
  41. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/dot_native.h +23 -0
  42. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask.h +39 -0
  43. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_native.h +21 -0
  44. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout.h +35 -0
  45. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_ops.h +39 -0
  46. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_ops.h +39 -0
  47. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_meta_dispatch.h +26 -0
  48. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h +25 -0
  49. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h +30 -0
  50. vllm/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_ops.h +39 -0
videollama2/lib/python3.10/site-packages/setuptools/cli-64.exe ADDED
Binary file (14.3 kB). View file
 
videollama2/lib/python3.10/site-packages/setuptools/gui-32.exe ADDED
Binary file (11.8 kB). View file
 
videollama2/lib/python3.10/site-packages/setuptools/gui-64.exe ADDED
Binary file (14.3 kB). View file
 
videollama2/lib/python3.10/site-packages/setuptools/gui-arm64.exe ADDED
Binary file (13.8 kB). View file
 
videollama2/lib/python3.10/site-packages/setuptools/monkey.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Monkey patching of distutils.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import inspect
8
+ import platform
9
+ import sys
10
+ import types
11
+ from typing import TypeVar, cast, overload
12
+
13
+ import distutils.filelist
14
+
15
+ _T = TypeVar("_T")
16
+ _UnpatchT = TypeVar("_UnpatchT", type, types.FunctionType)
17
+
18
+
19
+ __all__: list[str] = []
20
+ """
21
+ Everything is private. Contact the project team
22
+ if you think you need this functionality.
23
+ """
24
+
25
+
26
+ def _get_mro(cls):
27
+ """
28
+ Returns the bases classes for cls sorted by the MRO.
29
+
30
+ Works around an issue on Jython where inspect.getmro will not return all
31
+ base classes if multiple classes share the same name. Instead, this
32
+ function will return a tuple containing the class itself, and the contents
33
+ of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
34
+ """
35
+ if platform.python_implementation() == "Jython":
36
+ return (cls,) + cls.__bases__
37
+ return inspect.getmro(cls)
38
+
39
+
40
+ @overload
41
+ def get_unpatched(item: _UnpatchT) -> _UnpatchT: ...
42
+ @overload
43
+ def get_unpatched(item: object) -> None: ...
44
+ def get_unpatched(
45
+ item: type | types.FunctionType | object,
46
+ ) -> type | types.FunctionType | None:
47
+ if isinstance(item, type):
48
+ return get_unpatched_class(item)
49
+ if isinstance(item, types.FunctionType):
50
+ return get_unpatched_function(item)
51
+ return None
52
+
53
+
54
+ def get_unpatched_class(cls: type[_T]) -> type[_T]:
55
+ """Protect against re-patching the distutils if reloaded
56
+
57
+ Also ensures that no other distutils extension monkeypatched the distutils
58
+ first.
59
+ """
60
+ external_bases = (
61
+ cast(type[_T], cls)
62
+ for cls in _get_mro(cls)
63
+ if not cls.__module__.startswith('setuptools')
64
+ )
65
+ base = next(external_bases)
66
+ if not base.__module__.startswith('distutils'):
67
+ msg = f"distutils has already been patched by {cls!r}"
68
+ raise AssertionError(msg)
69
+ return base
70
+
71
+
72
+ def patch_all():
73
+ import setuptools
74
+
75
+ # we can't patch distutils.cmd, alas
76
+ distutils.core.Command = setuptools.Command # type: ignore[misc,assignment] # monkeypatching
77
+
78
+ _patch_distribution_metadata()
79
+
80
+ # Install Distribution throughout the distutils
81
+ for module in distutils.dist, distutils.core, distutils.cmd:
82
+ module.Distribution = setuptools.dist.Distribution
83
+
84
+ # Install the patched Extension
85
+ distutils.core.Extension = setuptools.extension.Extension # type: ignore[misc,assignment] # monkeypatching
86
+ distutils.extension.Extension = setuptools.extension.Extension # type: ignore[misc,assignment] # monkeypatching
87
+ if 'distutils.command.build_ext' in sys.modules:
88
+ sys.modules[
89
+ 'distutils.command.build_ext'
90
+ ].Extension = setuptools.extension.Extension
91
+
92
+
93
+ def _patch_distribution_metadata():
94
+ from . import _core_metadata
95
+
96
+ """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
97
+ for attr in (
98
+ 'write_pkg_info',
99
+ 'write_pkg_file',
100
+ 'read_pkg_file',
101
+ 'get_metadata_version',
102
+ 'get_fullname',
103
+ ):
104
+ new_val = getattr(_core_metadata, attr)
105
+ setattr(distutils.dist.DistributionMetadata, attr, new_val)
106
+
107
+
108
+ def patch_func(replacement, target_mod, func_name):
109
+ """
110
+ Patch func_name in target_mod with replacement
111
+
112
+ Important - original must be resolved by name to avoid
113
+ patching an already patched function.
114
+ """
115
+ original = getattr(target_mod, func_name)
116
+
117
+ # set the 'unpatched' attribute on the replacement to
118
+ # point to the original.
119
+ vars(replacement).setdefault('unpatched', original)
120
+
121
+ # replace the function in the original module
122
+ setattr(target_mod, func_name, replacement)
123
+
124
+
125
+ def get_unpatched_function(candidate):
126
+ return candidate.unpatched
videollama2/lib/python3.10/site-packages/setuptools/msvc.py ADDED
@@ -0,0 +1,1527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Environment info about Microsoft Compilers.
3
+
4
+ >>> getfixture('windows_only')
5
+ >>> ei = EnvironmentInfo('amd64')
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import contextlib
11
+ import itertools
12
+ import json
13
+ import os
14
+ import os.path
15
+ import platform
16
+ from typing import TYPE_CHECKING, TypedDict
17
+
18
+ from more_itertools import unique_everseen
19
+
20
+ import distutils.errors
21
+
22
+ if TYPE_CHECKING:
23
+ from typing_extensions import LiteralString, NotRequired
24
+
25
+ # https://github.com/python/mypy/issues/8166
26
+ if not TYPE_CHECKING and platform.system() == 'Windows':
27
+ import winreg
28
+ from os import environ
29
+ else:
30
+ # Mock winreg and environ so the module can be imported on this platform.
31
+
32
+ class winreg:
33
+ HKEY_USERS = None
34
+ HKEY_CURRENT_USER = None
35
+ HKEY_LOCAL_MACHINE = None
36
+ HKEY_CLASSES_ROOT = None
37
+
38
+ environ: dict[str, str] = dict()
39
+
40
+
41
+ class PlatformInfo:
42
+ """
43
+ Current and Target Architectures information.
44
+
45
+ Parameters
46
+ ----------
47
+ arch: str
48
+ Target architecture.
49
+ """
50
+
51
+ current_cpu = environ.get('processor_architecture', '').lower()
52
+
53
+ def __init__(self, arch) -> None:
54
+ self.arch = arch.lower().replace('x64', 'amd64')
55
+
56
+ @property
57
+ def target_cpu(self):
58
+ """
59
+ Return Target CPU architecture.
60
+
61
+ Return
62
+ ------
63
+ str
64
+ Target CPU
65
+ """
66
+ return self.arch[self.arch.find('_') + 1 :]
67
+
68
+ def target_is_x86(self):
69
+ """
70
+ Return True if target CPU is x86 32 bits..
71
+
72
+ Return
73
+ ------
74
+ bool
75
+ CPU is x86 32 bits
76
+ """
77
+ return self.target_cpu == 'x86'
78
+
79
+ def current_is_x86(self):
80
+ """
81
+ Return True if current CPU is x86 32 bits..
82
+
83
+ Return
84
+ ------
85
+ bool
86
+ CPU is x86 32 bits
87
+ """
88
+ return self.current_cpu == 'x86'
89
+
90
+ def current_dir(self, hidex86=False, x64=False) -> str:
91
+ """
92
+ Current platform specific subfolder.
93
+
94
+ Parameters
95
+ ----------
96
+ hidex86: bool
97
+ return '' and not '\x86' if architecture is x86.
98
+ x64: bool
99
+ return '\x64' and not '\amd64' if architecture is amd64.
100
+
101
+ Return
102
+ ------
103
+ str
104
+ subfolder: '\target', or '' (see hidex86 parameter)
105
+ """
106
+ return (
107
+ ''
108
+ if (self.current_cpu == 'x86' and hidex86)
109
+ else r'\x64'
110
+ if (self.current_cpu == 'amd64' and x64)
111
+ else rf'\{self.current_cpu}'
112
+ )
113
+
114
+ def target_dir(self, hidex86=False, x64=False) -> str:
115
+ r"""
116
+ Target platform specific subfolder.
117
+
118
+ Parameters
119
+ ----------
120
+ hidex86: bool
121
+ return '' and not '\x86' if architecture is x86.
122
+ x64: bool
123
+ return '\x64' and not '\amd64' if architecture is amd64.
124
+
125
+ Return
126
+ ------
127
+ str
128
+ subfolder: '\current', or '' (see hidex86 parameter)
129
+ """
130
+ return (
131
+ ''
132
+ if (self.target_cpu == 'x86' and hidex86)
133
+ else r'\x64'
134
+ if (self.target_cpu == 'amd64' and x64)
135
+ else rf'\{self.target_cpu}'
136
+ )
137
+
138
+ def cross_dir(self, forcex86=False):
139
+ r"""
140
+ Cross platform specific subfolder.
141
+
142
+ Parameters
143
+ ----------
144
+ forcex86: bool
145
+ Use 'x86' as current architecture even if current architecture is
146
+ not x86.
147
+
148
+ Return
149
+ ------
150
+ str
151
+ subfolder: '' if target architecture is current architecture,
152
+ '\current_target' if not.
153
+ """
154
+ current = 'x86' if forcex86 else self.current_cpu
155
+ return (
156
+ ''
157
+ if self.target_cpu == current
158
+ else self.target_dir().replace('\\', f'\\{current}_')
159
+ )
160
+
161
+
162
+ class RegistryInfo:
163
+ """
164
+ Microsoft Visual Studio related registry information.
165
+
166
+ Parameters
167
+ ----------
168
+ platform_info: PlatformInfo
169
+ "PlatformInfo" instance.
170
+ """
171
+
172
+ HKEYS = (
173
+ winreg.HKEY_USERS,
174
+ winreg.HKEY_CURRENT_USER,
175
+ winreg.HKEY_LOCAL_MACHINE,
176
+ winreg.HKEY_CLASSES_ROOT,
177
+ )
178
+
179
+ def __init__(self, platform_info) -> None:
180
+ self.pi = platform_info
181
+
182
+ @property
183
+ def visualstudio(self) -> str:
184
+ """
185
+ Microsoft Visual Studio root registry key.
186
+
187
+ Return
188
+ ------
189
+ str
190
+ Registry key
191
+ """
192
+ return 'VisualStudio'
193
+
194
+ @property
195
+ def sxs(self):
196
+ """
197
+ Microsoft Visual Studio SxS registry key.
198
+
199
+ Return
200
+ ------
201
+ str
202
+ Registry key
203
+ """
204
+ return os.path.join(self.visualstudio, 'SxS')
205
+
206
+ @property
207
+ def vc(self):
208
+ """
209
+ Microsoft Visual C++ VC7 registry key.
210
+
211
+ Return
212
+ ------
213
+ str
214
+ Registry key
215
+ """
216
+ return os.path.join(self.sxs, 'VC7')
217
+
218
+ @property
219
+ def vs(self):
220
+ """
221
+ Microsoft Visual Studio VS7 registry key.
222
+
223
+ Return
224
+ ------
225
+ str
226
+ Registry key
227
+ """
228
+ return os.path.join(self.sxs, 'VS7')
229
+
230
+ @property
231
+ def vc_for_python(self) -> str:
232
+ """
233
+ Microsoft Visual C++ for Python registry key.
234
+
235
+ Return
236
+ ------
237
+ str
238
+ Registry key
239
+ """
240
+ return r'DevDiv\VCForPython'
241
+
242
+ @property
243
+ def microsoft_sdk(self) -> str:
244
+ """
245
+ Microsoft SDK registry key.
246
+
247
+ Return
248
+ ------
249
+ str
250
+ Registry key
251
+ """
252
+ return 'Microsoft SDKs'
253
+
254
+ @property
255
+ def windows_sdk(self):
256
+ """
257
+ Microsoft Windows/Platform SDK registry key.
258
+
259
+ Return
260
+ ------
261
+ str
262
+ Registry key
263
+ """
264
+ return os.path.join(self.microsoft_sdk, 'Windows')
265
+
266
+ @property
267
+ def netfx_sdk(self):
268
+ """
269
+ Microsoft .NET Framework SDK registry key.
270
+
271
+ Return
272
+ ------
273
+ str
274
+ Registry key
275
+ """
276
+ return os.path.join(self.microsoft_sdk, 'NETFXSDK')
277
+
278
+ @property
279
+ def windows_kits_roots(self) -> str:
280
+ """
281
+ Microsoft Windows Kits Roots registry key.
282
+
283
+ Return
284
+ ------
285
+ str
286
+ Registry key
287
+ """
288
+ return r'Windows Kits\Installed Roots'
289
+
290
+ def microsoft(self, key, x86=False):
291
+ """
292
+ Return key in Microsoft software registry.
293
+
294
+ Parameters
295
+ ----------
296
+ key: str
297
+ Registry key path where look.
298
+ x86: str
299
+ Force x86 software registry.
300
+
301
+ Return
302
+ ------
303
+ str
304
+ Registry key
305
+ """
306
+ node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
307
+ return os.path.join('Software', node64, 'Microsoft', key)
308
+
309
+ def lookup(self, key, name):
310
+ """
311
+ Look for values in registry in Microsoft software registry.
312
+
313
+ Parameters
314
+ ----------
315
+ key: str
316
+ Registry key path where look.
317
+ name: str
318
+ Value name to find.
319
+
320
+ Return
321
+ ------
322
+ str
323
+ value
324
+ """
325
+ key_read = winreg.KEY_READ
326
+ openkey = winreg.OpenKey
327
+ closekey = winreg.CloseKey
328
+ ms = self.microsoft
329
+ for hkey in self.HKEYS:
330
+ bkey = None
331
+ try:
332
+ bkey = openkey(hkey, ms(key), 0, key_read)
333
+ except OSError:
334
+ if not self.pi.current_is_x86():
335
+ try:
336
+ bkey = openkey(hkey, ms(key, True), 0, key_read)
337
+ except OSError:
338
+ continue
339
+ else:
340
+ continue
341
+ try:
342
+ return winreg.QueryValueEx(bkey, name)[0]
343
+ except OSError:
344
+ pass
345
+ finally:
346
+ if bkey:
347
+ closekey(bkey)
348
+ return None
349
+
350
+
351
+ class SystemInfo:
352
+ """
353
+ Microsoft Windows and Visual Studio related system information.
354
+
355
+ Parameters
356
+ ----------
357
+ registry_info: RegistryInfo
358
+ "RegistryInfo" instance.
359
+ vc_ver: float
360
+ Required Microsoft Visual C++ version.
361
+ """
362
+
363
+ # Variables and properties in this class use originals CamelCase variables
364
+ # names from Microsoft source files for more easy comparison.
365
+ WinDir = environ.get('WinDir', '')
366
+ ProgramFiles = environ.get('ProgramFiles', '')
367
+ ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
368
+
369
+ def __init__(self, registry_info, vc_ver=None) -> None:
370
+ self.ri = registry_info
371
+ self.pi = self.ri.pi
372
+
373
+ self.known_vs_paths = self.find_programdata_vs_vers()
374
+
375
+ # Except for VS15+, VC version is aligned with VS version
376
+ self.vs_ver = self.vc_ver = vc_ver or self._find_latest_available_vs_ver()
377
+
378
+ def _find_latest_available_vs_ver(self):
379
+ """
380
+ Find the latest VC version
381
+
382
+ Return
383
+ ------
384
+ float
385
+ version
386
+ """
387
+ reg_vc_vers = self.find_reg_vs_vers()
388
+
389
+ if not (reg_vc_vers or self.known_vs_paths):
390
+ raise distutils.errors.DistutilsPlatformError(
391
+ 'No Microsoft Visual C++ version found'
392
+ )
393
+
394
+ vc_vers = set(reg_vc_vers)
395
+ vc_vers.update(self.known_vs_paths)
396
+ return sorted(vc_vers)[-1]
397
+
398
+ def find_reg_vs_vers(self):
399
+ """
400
+ Find Microsoft Visual Studio versions available in registry.
401
+
402
+ Return
403
+ ------
404
+ list of float
405
+ Versions
406
+ """
407
+ ms = self.ri.microsoft
408
+ vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
409
+ vs_vers = []
410
+ for hkey, key in itertools.product(self.ri.HKEYS, vckeys):
411
+ try:
412
+ bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
413
+ except OSError:
414
+ continue
415
+ with bkey:
416
+ subkeys, values, _ = winreg.QueryInfoKey(bkey)
417
+ for i in range(values):
418
+ with contextlib.suppress(ValueError):
419
+ ver = float(winreg.EnumValue(bkey, i)[0])
420
+ if ver not in vs_vers:
421
+ vs_vers.append(ver)
422
+ for i in range(subkeys):
423
+ with contextlib.suppress(ValueError):
424
+ ver = float(winreg.EnumKey(bkey, i))
425
+ if ver not in vs_vers:
426
+ vs_vers.append(ver)
427
+ return sorted(vs_vers)
428
+
429
+ def find_programdata_vs_vers(self) -> dict[float, str]:
430
+ r"""
431
+ Find Visual studio 2017+ versions from information in
432
+ "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
433
+
434
+ Return
435
+ ------
436
+ dict
437
+ float version as key, path as value.
438
+ """
439
+ vs_versions: dict[float, str] = {}
440
+ instances_dir = r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
441
+
442
+ try:
443
+ hashed_names = os.listdir(instances_dir)
444
+
445
+ except OSError:
446
+ # Directory not exists with all Visual Studio versions
447
+ return vs_versions
448
+
449
+ for name in hashed_names:
450
+ try:
451
+ # Get VS installation path from "state.json" file
452
+ state_path = os.path.join(instances_dir, name, 'state.json')
453
+ with open(state_path, 'rt', encoding='utf-8') as state_file:
454
+ state = json.load(state_file)
455
+ vs_path = state['installationPath']
456
+
457
+ # Raises OSError if this VS installation does not contain VC
458
+ os.listdir(os.path.join(vs_path, r'VC\Tools\MSVC'))
459
+
460
+ # Store version and path
461
+ vs_versions[self._as_float_version(state['installationVersion'])] = (
462
+ vs_path
463
+ )
464
+
465
+ except (OSError, KeyError):
466
+ # Skip if "state.json" file is missing or bad format
467
+ continue
468
+
469
+ return vs_versions
470
+
471
+ @staticmethod
472
+ def _as_float_version(version):
473
+ """
474
+ Return a string version as a simplified float version (major.minor)
475
+
476
+ Parameters
477
+ ----------
478
+ version: str
479
+ Version.
480
+
481
+ Return
482
+ ------
483
+ float
484
+ version
485
+ """
486
+ return float('.'.join(version.split('.')[:2]))
487
+
488
+ @property
489
+ def VSInstallDir(self):
490
+ """
491
+ Microsoft Visual Studio directory.
492
+
493
+ Return
494
+ ------
495
+ str
496
+ path
497
+ """
498
+ # Default path
499
+ default = os.path.join(
500
+ self.ProgramFilesx86, f'Microsoft Visual Studio {self.vs_ver:0.1f}'
501
+ )
502
+
503
+ # Try to get path from registry, if fail use default path
504
+ return self.ri.lookup(self.ri.vs, f'{self.vs_ver:0.1f}') or default
505
+
506
+ @property
507
+ def VCInstallDir(self):
508
+ """
509
+ Microsoft Visual C++ directory.
510
+
511
+ Return
512
+ ------
513
+ str
514
+ path
515
+ """
516
+ path = self._guess_vc() or self._guess_vc_legacy()
517
+
518
+ if not os.path.isdir(path):
519
+ msg = 'Microsoft Visual C++ directory not found'
520
+ raise distutils.errors.DistutilsPlatformError(msg)
521
+
522
+ return path
523
+
524
+ def _guess_vc(self):
525
+ """
526
+ Locate Visual C++ for VS2017+.
527
+
528
+ Return
529
+ ------
530
+ str
531
+ path
532
+ """
533
+ if self.vs_ver <= 14.0:
534
+ return ''
535
+
536
+ try:
537
+ # First search in known VS paths
538
+ vs_dir = self.known_vs_paths[self.vs_ver]
539
+ except KeyError:
540
+ # Else, search with path from registry
541
+ vs_dir = self.VSInstallDir
542
+
543
+ guess_vc = os.path.join(vs_dir, r'VC\Tools\MSVC')
544
+
545
+ # Subdir with VC exact version as name
546
+ try:
547
+ # Update the VC version with real one instead of VS version
548
+ vc_ver = os.listdir(guess_vc)[-1]
549
+ self.vc_ver = self._as_float_version(vc_ver)
550
+ return os.path.join(guess_vc, vc_ver)
551
+ except (OSError, IndexError):
552
+ return ''
553
+
554
+ def _guess_vc_legacy(self):
555
+ """
556
+ Locate Visual C++ for versions prior to 2017.
557
+
558
+ Return
559
+ ------
560
+ str
561
+ path
562
+ """
563
+ default = os.path.join(
564
+ self.ProgramFilesx86,
565
+ rf'Microsoft Visual Studio {self.vs_ver:0.1f}\VC',
566
+ )
567
+
568
+ # Try to get "VC++ for Python" path from registry as default path
569
+ reg_path = os.path.join(self.ri.vc_for_python, f'{self.vs_ver:0.1f}')
570
+ python_vc = self.ri.lookup(reg_path, 'installdir')
571
+ default_vc = os.path.join(python_vc, 'VC') if python_vc else default
572
+
573
+ # Try to get path from registry, if fail use default path
574
+ return self.ri.lookup(self.ri.vc, f'{self.vs_ver:0.1f}') or default_vc
575
+
576
+ @property
577
+ def WindowsSdkVersion(self) -> tuple[LiteralString, ...]:
578
+ """
579
+ Microsoft Windows SDK versions for specified MSVC++ version.
580
+
581
+ Return
582
+ ------
583
+ tuple of str
584
+ versions
585
+ """
586
+ if self.vs_ver <= 9.0:
587
+ return '7.0', '6.1', '6.0a'
588
+ elif self.vs_ver == 10.0:
589
+ return '7.1', '7.0a'
590
+ elif self.vs_ver == 11.0:
591
+ return '8.0', '8.0a'
592
+ elif self.vs_ver == 12.0:
593
+ return '8.1', '8.1a'
594
+ elif self.vs_ver >= 14.0:
595
+ return '10.0', '8.1'
596
+ return ()
597
+
598
+ @property
599
+ def WindowsSdkLastVersion(self):
600
+ """
601
+ Microsoft Windows SDK last version.
602
+
603
+ Return
604
+ ------
605
+ str
606
+ version
607
+ """
608
+ return self._use_last_dir_name(os.path.join(self.WindowsSdkDir, 'lib'))
609
+
610
+ @property
611
+ def WindowsSdkDir(self) -> str | None: # noqa: C901 # is too complex (12) # FIXME
612
+ """
613
+ Microsoft Windows SDK directory.
614
+
615
+ Return
616
+ ------
617
+ str
618
+ path
619
+ """
620
+ sdkdir: str | None = ''
621
+ for ver in self.WindowsSdkVersion:
622
+ # Try to get it from registry
623
+ loc = os.path.join(self.ri.windows_sdk, f'v{ver}')
624
+ sdkdir = self.ri.lookup(loc, 'installationfolder')
625
+ if sdkdir:
626
+ break
627
+ if not sdkdir or not os.path.isdir(sdkdir):
628
+ # Try to get "VC++ for Python" version from registry
629
+ path = os.path.join(self.ri.vc_for_python, f'{self.vc_ver:0.1f}')
630
+ install_base = self.ri.lookup(path, 'installdir')
631
+ if install_base:
632
+ sdkdir = os.path.join(install_base, 'WinSDK')
633
+ if not sdkdir or not os.path.isdir(sdkdir):
634
+ # If fail, use default new path
635
+ for ver in self.WindowsSdkVersion:
636
+ intver = ver[: ver.rfind('.')]
637
+ path = rf'Microsoft SDKs\Windows Kits\{intver}'
638
+ d = os.path.join(self.ProgramFiles, path)
639
+ if os.path.isdir(d):
640
+ sdkdir = d
641
+ if not sdkdir or not os.path.isdir(sdkdir):
642
+ # If fail, use default old path
643
+ for ver in self.WindowsSdkVersion:
644
+ path = rf'Microsoft SDKs\Windows\v{ver}'
645
+ d = os.path.join(self.ProgramFiles, path)
646
+ if os.path.isdir(d):
647
+ sdkdir = d
648
+ if not sdkdir:
649
+ # If fail, use Platform SDK
650
+ sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK')
651
+ return sdkdir
652
+
653
+ @property
654
+ def WindowsSDKExecutablePath(self):
655
+ """
656
+ Microsoft Windows SDK executable directory.
657
+
658
+ Return
659
+ ------
660
+ str
661
+ path
662
+ """
663
+ # Find WinSDK NetFx Tools registry dir name
664
+ if self.vs_ver <= 11.0:
665
+ netfxver = 35
666
+ arch = ''
667
+ else:
668
+ netfxver = 40
669
+ hidex86 = True if self.vs_ver <= 12.0 else False
670
+ arch = self.pi.current_dir(x64=True, hidex86=hidex86).replace('\\', '-')
671
+ fx = f'WinSDK-NetFx{netfxver}Tools{arch}'
672
+
673
+ # list all possibles registry paths
674
+ regpaths = []
675
+ if self.vs_ver >= 14.0:
676
+ for ver in self.NetFxSdkVersion:
677
+ regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)]
678
+
679
+ for ver in self.WindowsSdkVersion:
680
+ regpaths += [os.path.join(self.ri.windows_sdk, f'v{ver}A', fx)]
681
+
682
+ # Return installation folder from the more recent path
683
+ for path in regpaths:
684
+ execpath = self.ri.lookup(path, 'installationfolder')
685
+ if execpath:
686
+ return execpath
687
+
688
+ return None
689
+
690
+ @property
691
+ def FSharpInstallDir(self):
692
+ """
693
+ Microsoft Visual F# directory.
694
+
695
+ Return
696
+ ------
697
+ str
698
+ path
699
+ """
700
+ path = os.path.join(self.ri.visualstudio, rf'{self.vs_ver:0.1f}\Setup\F#')
701
+ return self.ri.lookup(path, 'productdir') or ''
702
+
703
+ @property
704
+ def UniversalCRTSdkDir(self):
705
+ """
706
+ Microsoft Universal CRT SDK directory.
707
+
708
+ Return
709
+ ------
710
+ str
711
+ path
712
+ """
713
+ # Set Kit Roots versions for specified MSVC++ version
714
+ vers = ('10', '81') if self.vs_ver >= 14.0 else ()
715
+
716
+ # Find path of the more recent Kit
717
+ for ver in vers:
718
+ sdkdir = self.ri.lookup(self.ri.windows_kits_roots, f'kitsroot{ver}')
719
+ if sdkdir:
720
+ return sdkdir or ''
721
+
722
+ return None
723
+
724
+ @property
725
+ def UniversalCRTSdkLastVersion(self):
726
+ """
727
+ Microsoft Universal C Runtime SDK last version.
728
+
729
+ Return
730
+ ------
731
+ str
732
+ version
733
+ """
734
+ return self._use_last_dir_name(os.path.join(self.UniversalCRTSdkDir, 'lib'))
735
+
736
+ @property
737
+ def NetFxSdkVersion(self):
738
+ """
739
+ Microsoft .NET Framework SDK versions.
740
+
741
+ Return
742
+ ------
743
+ tuple of str
744
+ versions
745
+ """
746
+ # Set FxSdk versions for specified VS version
747
+ return (
748
+ ('4.7.2', '4.7.1', '4.7', '4.6.2', '4.6.1', '4.6', '4.5.2', '4.5.1', '4.5')
749
+ if self.vs_ver >= 14.0
750
+ else ()
751
+ )
752
+
753
+ @property
754
+ def NetFxSdkDir(self):
755
+ """
756
+ Microsoft .NET Framework SDK directory.
757
+
758
+ Return
759
+ ------
760
+ str
761
+ path
762
+ """
763
+ sdkdir = ''
764
+ for ver in self.NetFxSdkVersion:
765
+ loc = os.path.join(self.ri.netfx_sdk, ver)
766
+ sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
767
+ if sdkdir:
768
+ break
769
+ return sdkdir
770
+
771
+ @property
772
+ def FrameworkDir32(self):
773
+ """
774
+ Microsoft .NET Framework 32bit directory.
775
+
776
+ Return
777
+ ------
778
+ str
779
+ path
780
+ """
781
+ # Default path
782
+ guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
783
+
784
+ # Try to get path from registry, if fail use default path
785
+ return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
786
+
787
+ @property
788
+ def FrameworkDir64(self):
789
+ """
790
+ Microsoft .NET Framework 64bit directory.
791
+
792
+ Return
793
+ ------
794
+ str
795
+ path
796
+ """
797
+ # Default path
798
+ guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64')
799
+
800
+ # Try to get path from registry, if fail use default path
801
+ return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
802
+
803
+ @property
804
+ def FrameworkVersion32(self) -> tuple[str, ...]:
805
+ """
806
+ Microsoft .NET Framework 32bit versions.
807
+
808
+ Return
809
+ ------
810
+ tuple of str
811
+ versions
812
+ """
813
+ return self._find_dot_net_versions(32)
814
+
815
+ @property
816
+ def FrameworkVersion64(self) -> tuple[str, ...]:
817
+ """
818
+ Microsoft .NET Framework 64bit versions.
819
+
820
+ Return
821
+ ------
822
+ tuple of str
823
+ versions
824
+ """
825
+ return self._find_dot_net_versions(64)
826
+
827
+ def _find_dot_net_versions(self, bits) -> tuple[str, ...]:
828
+ """
829
+ Find Microsoft .NET Framework versions.
830
+
831
+ Parameters
832
+ ----------
833
+ bits: int
834
+ Platform number of bits: 32 or 64.
835
+
836
+ Return
837
+ ------
838
+ tuple of str
839
+ versions
840
+ """
841
+ # Find actual .NET version in registry
842
+ reg_ver = self.ri.lookup(self.ri.vc, f'frameworkver{bits}')
843
+ dot_net_dir = getattr(self, f'FrameworkDir{bits}')
844
+ ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
845
+
846
+ # Set .NET versions for specified MSVC++ version
847
+ if self.vs_ver >= 12.0:
848
+ return ver, 'v4.0'
849
+ elif self.vs_ver >= 10.0:
850
+ return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
851
+ elif self.vs_ver == 9.0:
852
+ return 'v3.5', 'v2.0.50727'
853
+ elif self.vs_ver == 8.0:
854
+ return 'v3.0', 'v2.0.50727'
855
+ return ()
856
+
857
+ @staticmethod
858
+ def _use_last_dir_name(path, prefix=''):
859
+ """
860
+ Return name of the last dir in path or '' if no dir found.
861
+
862
+ Parameters
863
+ ----------
864
+ path: str
865
+ Use dirs in this path
866
+ prefix: str
867
+ Use only dirs starting by this prefix
868
+
869
+ Return
870
+ ------
871
+ str
872
+ name
873
+ """
874
+ matching_dirs = (
875
+ dir_name
876
+ for dir_name in reversed(os.listdir(path))
877
+ if os.path.isdir(os.path.join(path, dir_name))
878
+ and dir_name.startswith(prefix)
879
+ )
880
+ return next(matching_dirs, None) or ''
881
+
882
+
883
+ class _EnvironmentDict(TypedDict):
884
+ include: str
885
+ lib: str
886
+ libpath: str
887
+ path: str
888
+ py_vcruntime_redist: NotRequired[str | None]
889
+
890
+
891
+ class EnvironmentInfo:
892
+ """
893
+ Return environment variables for specified Microsoft Visual C++ version
894
+ and platform : Lib, Include, Path and libpath.
895
+
896
+ This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
897
+
898
+ Script created by analysing Microsoft environment configuration files like
899
+ "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
900
+
901
+ Parameters
902
+ ----------
903
+ arch: str
904
+ Target architecture.
905
+ vc_ver: float
906
+ Required Microsoft Visual C++ version. If not set, autodetect the last
907
+ version.
908
+ vc_min_ver: float
909
+ Minimum Microsoft Visual C++ version.
910
+ """
911
+
912
+ # Variables and properties in this class use originals CamelCase variables
913
+ # names from Microsoft source files for more easy comparison.
914
+
915
+ def __init__(self, arch, vc_ver=None, vc_min_ver=0) -> None:
916
+ self.pi = PlatformInfo(arch)
917
+ self.ri = RegistryInfo(self.pi)
918
+ self.si = SystemInfo(self.ri, vc_ver)
919
+
920
+ if self.vc_ver < vc_min_ver:
921
+ err = 'No suitable Microsoft Visual C++ version found'
922
+ raise distutils.errors.DistutilsPlatformError(err)
923
+
924
+ @property
925
+ def vs_ver(self):
926
+ """
927
+ Microsoft Visual Studio.
928
+
929
+ Return
930
+ ------
931
+ float
932
+ version
933
+ """
934
+ return self.si.vs_ver
935
+
936
+ @property
937
+ def vc_ver(self):
938
+ """
939
+ Microsoft Visual C++ version.
940
+
941
+ Return
942
+ ------
943
+ float
944
+ version
945
+ """
946
+ return self.si.vc_ver
947
+
948
+ @property
949
+ def VSTools(self):
950
+ """
951
+ Microsoft Visual Studio Tools.
952
+
953
+ Return
954
+ ------
955
+ list of str
956
+ paths
957
+ """
958
+ paths = [r'Common7\IDE', r'Common7\Tools']
959
+
960
+ if self.vs_ver >= 14.0:
961
+ arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
962
+ paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
963
+ paths += [r'Team Tools\Performance Tools']
964
+ paths += [rf'Team Tools\Performance Tools{arch_subdir}']
965
+
966
+ return [os.path.join(self.si.VSInstallDir, path) for path in paths]
967
+
968
+ @property
969
+ def VCIncludes(self):
970
+ """
971
+ Microsoft Visual C++ & Microsoft Foundation Class Includes.
972
+
973
+ Return
974
+ ------
975
+ list of str
976
+ paths
977
+ """
978
+ return [
979
+ os.path.join(self.si.VCInstallDir, 'Include'),
980
+ os.path.join(self.si.VCInstallDir, r'ATLMFC\Include'),
981
+ ]
982
+
983
+ @property
984
+ def VCLibraries(self):
985
+ """
986
+ Microsoft Visual C++ & Microsoft Foundation Class Libraries.
987
+
988
+ Return
989
+ ------
990
+ list of str
991
+ paths
992
+ """
993
+ if self.vs_ver >= 15.0:
994
+ arch_subdir = self.pi.target_dir(x64=True)
995
+ else:
996
+ arch_subdir = self.pi.target_dir(hidex86=True)
997
+ paths = [f'Lib{arch_subdir}', rf'ATLMFC\Lib{arch_subdir}']
998
+
999
+ if self.vs_ver >= 14.0:
1000
+ paths += [rf'Lib\store{arch_subdir}']
1001
+
1002
+ return [os.path.join(self.si.VCInstallDir, path) for path in paths]
1003
+
1004
+ @property
1005
+ def VCStoreRefs(self):
1006
+ """
1007
+ Microsoft Visual C++ store references Libraries.
1008
+
1009
+ Return
1010
+ ------
1011
+ list of str
1012
+ paths
1013
+ """
1014
+ if self.vs_ver < 14.0:
1015
+ return []
1016
+ return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')]
1017
+
1018
+ @property
1019
+ def VCTools(self):
1020
+ """
1021
+ Microsoft Visual C++ Tools.
1022
+
1023
+ Return
1024
+ ------
1025
+ list of str
1026
+ paths
1027
+ """
1028
+ si = self.si
1029
+ tools = [os.path.join(si.VCInstallDir, 'VCPackages')]
1030
+
1031
+ forcex86 = True if self.vs_ver <= 10.0 else False
1032
+ arch_subdir = self.pi.cross_dir(forcex86)
1033
+ if arch_subdir:
1034
+ tools += [os.path.join(si.VCInstallDir, f'Bin{arch_subdir}')]
1035
+
1036
+ if self.vs_ver == 14.0:
1037
+ path = f'Bin{self.pi.current_dir(hidex86=True)}'
1038
+ tools += [os.path.join(si.VCInstallDir, path)]
1039
+
1040
+ elif self.vs_ver >= 15.0:
1041
+ host_dir = (
1042
+ r'bin\HostX86%s' if self.pi.current_is_x86() else r'bin\HostX64%s'
1043
+ )
1044
+ tools += [
1045
+ os.path.join(si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))
1046
+ ]
1047
+
1048
+ if self.pi.current_cpu != self.pi.target_cpu:
1049
+ tools += [
1050
+ os.path.join(
1051
+ si.VCInstallDir, host_dir % self.pi.current_dir(x64=True)
1052
+ )
1053
+ ]
1054
+
1055
+ else:
1056
+ tools += [os.path.join(si.VCInstallDir, 'Bin')]
1057
+
1058
+ return tools
1059
+
1060
+ @property
1061
+ def OSLibraries(self):
1062
+ """
1063
+ Microsoft Windows SDK Libraries.
1064
+
1065
+ Return
1066
+ ------
1067
+ list of str
1068
+ paths
1069
+ """
1070
+ if self.vs_ver <= 10.0:
1071
+ arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
1072
+ return [os.path.join(self.si.WindowsSdkDir, f'Lib{arch_subdir}')]
1073
+
1074
+ else:
1075
+ arch_subdir = self.pi.target_dir(x64=True)
1076
+ lib = os.path.join(self.si.WindowsSdkDir, 'lib')
1077
+ libver = self._sdk_subdir
1078
+ return [os.path.join(lib, f'{libver}um{arch_subdir}')]
1079
+
1080
+ @property
1081
+ def OSIncludes(self):
1082
+ """
1083
+ Microsoft Windows SDK Include.
1084
+
1085
+ Return
1086
+ ------
1087
+ list of str
1088
+ paths
1089
+ """
1090
+ include = os.path.join(self.si.WindowsSdkDir, 'include')
1091
+
1092
+ if self.vs_ver <= 10.0:
1093
+ return [include, os.path.join(include, 'gl')]
1094
+
1095
+ else:
1096
+ if self.vs_ver >= 14.0:
1097
+ sdkver = self._sdk_subdir
1098
+ else:
1099
+ sdkver = ''
1100
+ return [
1101
+ os.path.join(include, f'{sdkver}shared'),
1102
+ os.path.join(include, f'{sdkver}um'),
1103
+ os.path.join(include, f'{sdkver}winrt'),
1104
+ ]
1105
+
1106
+ @property
1107
+ def OSLibpath(self):
1108
+ """
1109
+ Microsoft Windows SDK Libraries Paths.
1110
+
1111
+ Return
1112
+ ------
1113
+ list of str
1114
+ paths
1115
+ """
1116
+ ref = os.path.join(self.si.WindowsSdkDir, 'References')
1117
+ libpath = []
1118
+
1119
+ if self.vs_ver <= 9.0:
1120
+ libpath += self.OSLibraries
1121
+
1122
+ if self.vs_ver >= 11.0:
1123
+ libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
1124
+
1125
+ if self.vs_ver >= 14.0:
1126
+ libpath += [
1127
+ ref,
1128
+ os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
1129
+ os.path.join(ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
1130
+ os.path.join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
1131
+ os.path.join(
1132
+ ref, 'Windows.Networking.Connectivity.WwanContract', '1.0.0.0'
1133
+ ),
1134
+ os.path.join(
1135
+ self.si.WindowsSdkDir,
1136
+ 'ExtensionSDKs',
1137
+ 'Microsoft.VCLibs',
1138
+ f'{self.vs_ver:0.1f}',
1139
+ 'References',
1140
+ 'CommonConfiguration',
1141
+ 'neutral',
1142
+ ),
1143
+ ]
1144
+ return libpath
1145
+
1146
+ @property
1147
+ def SdkTools(self):
1148
+ """
1149
+ Microsoft Windows SDK Tools.
1150
+
1151
+ Return
1152
+ ------
1153
+ list of str
1154
+ paths
1155
+ """
1156
+ return list(self._sdk_tools())
1157
+
1158
+ def _sdk_tools(self):
1159
+ """
1160
+ Microsoft Windows SDK Tools paths generator.
1161
+
1162
+ Return
1163
+ ------
1164
+ generator of str
1165
+ paths
1166
+ """
1167
+ if self.vs_ver < 15.0:
1168
+ bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
1169
+ yield os.path.join(self.si.WindowsSdkDir, bin_dir)
1170
+
1171
+ if not self.pi.current_is_x86():
1172
+ arch_subdir = self.pi.current_dir(x64=True)
1173
+ path = f'Bin{arch_subdir}'
1174
+ yield os.path.join(self.si.WindowsSdkDir, path)
1175
+
1176
+ if self.vs_ver in (10.0, 11.0):
1177
+ if self.pi.target_is_x86():
1178
+ arch_subdir = ''
1179
+ else:
1180
+ arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
1181
+ path = rf'Bin\NETFX 4.0 Tools{arch_subdir}'
1182
+ yield os.path.join(self.si.WindowsSdkDir, path)
1183
+
1184
+ elif self.vs_ver >= 15.0:
1185
+ path = os.path.join(self.si.WindowsSdkDir, 'Bin')
1186
+ arch_subdir = self.pi.current_dir(x64=True)
1187
+ sdkver = self.si.WindowsSdkLastVersion
1188
+ yield os.path.join(path, f'{sdkver}{arch_subdir}')
1189
+
1190
+ if self.si.WindowsSDKExecutablePath:
1191
+ yield self.si.WindowsSDKExecutablePath
1192
+
1193
+ @property
1194
+ def _sdk_subdir(self):
1195
+ """
1196
+ Microsoft Windows SDK version subdir.
1197
+
1198
+ Return
1199
+ ------
1200
+ str
1201
+ subdir
1202
+ """
1203
+ ucrtver = self.si.WindowsSdkLastVersion
1204
+ return (f'{ucrtver}\\') if ucrtver else ''
1205
+
1206
+ @property
1207
+ def SdkSetup(self):
1208
+ """
1209
+ Microsoft Windows SDK Setup.
1210
+
1211
+ Return
1212
+ ------
1213
+ list of str
1214
+ paths
1215
+ """
1216
+ if self.vs_ver > 9.0:
1217
+ return []
1218
+
1219
+ return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
1220
+
1221
+ @property
1222
+ def FxTools(self):
1223
+ """
1224
+ Microsoft .NET Framework Tools.
1225
+
1226
+ Return
1227
+ ------
1228
+ list of str
1229
+ paths
1230
+ """
1231
+ pi = self.pi
1232
+ si = self.si
1233
+
1234
+ if self.vs_ver <= 10.0:
1235
+ include32 = True
1236
+ include64 = not pi.target_is_x86() and not pi.current_is_x86()
1237
+ else:
1238
+ include32 = pi.target_is_x86() or pi.current_is_x86()
1239
+ include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
1240
+
1241
+ tools = []
1242
+ if include32:
1243
+ tools += [
1244
+ os.path.join(si.FrameworkDir32, ver) for ver in si.FrameworkVersion32
1245
+ ]
1246
+ if include64:
1247
+ tools += [
1248
+ os.path.join(si.FrameworkDir64, ver) for ver in si.FrameworkVersion64
1249
+ ]
1250
+ return tools
1251
+
1252
+ @property
1253
+ def NetFxSDKLibraries(self):
1254
+ """
1255
+ Microsoft .Net Framework SDK Libraries.
1256
+
1257
+ Return
1258
+ ------
1259
+ list of str
1260
+ paths
1261
+ """
1262
+ if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
1263
+ return []
1264
+
1265
+ arch_subdir = self.pi.target_dir(x64=True)
1266
+ return [os.path.join(self.si.NetFxSdkDir, rf'lib\um{arch_subdir}')]
1267
+
1268
+ @property
1269
+ def NetFxSDKIncludes(self):
1270
+ """
1271
+ Microsoft .Net Framework SDK Includes.
1272
+
1273
+ Return
1274
+ ------
1275
+ list of str
1276
+ paths
1277
+ """
1278
+ if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
1279
+ return []
1280
+
1281
+ return [os.path.join(self.si.NetFxSdkDir, r'include\um')]
1282
+
1283
+ @property
1284
+ def VsTDb(self):
1285
+ """
1286
+ Microsoft Visual Studio Team System Database.
1287
+
1288
+ Return
1289
+ ------
1290
+ list of str
1291
+ paths
1292
+ """
1293
+ return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
1294
+
1295
+ @property
1296
+ def MSBuild(self):
1297
+ """
1298
+ Microsoft Build Engine.
1299
+
1300
+ Return
1301
+ ------
1302
+ list of str
1303
+ paths
1304
+ """
1305
+ if self.vs_ver < 12.0:
1306
+ return []
1307
+ elif self.vs_ver < 15.0:
1308
+ base_path = self.si.ProgramFilesx86
1309
+ arch_subdir = self.pi.current_dir(hidex86=True)
1310
+ else:
1311
+ base_path = self.si.VSInstallDir
1312
+ arch_subdir = ''
1313
+
1314
+ path = rf'MSBuild\{self.vs_ver:0.1f}\bin{arch_subdir}'
1315
+ build = [os.path.join(base_path, path)]
1316
+
1317
+ if self.vs_ver >= 15.0:
1318
+ # Add Roslyn C# & Visual Basic Compiler
1319
+ build += [os.path.join(base_path, path, 'Roslyn')]
1320
+
1321
+ return build
1322
+
1323
+ @property
1324
+ def HTMLHelpWorkshop(self):
1325
+ """
1326
+ Microsoft HTML Help Workshop.
1327
+
1328
+ Return
1329
+ ------
1330
+ list of str
1331
+ paths
1332
+ """
1333
+ if self.vs_ver < 11.0:
1334
+ return []
1335
+
1336
+ return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
1337
+
1338
+ @property
1339
+ def UCRTLibraries(self):
1340
+ """
1341
+ Microsoft Universal C Runtime SDK Libraries.
1342
+
1343
+ Return
1344
+ ------
1345
+ list of str
1346
+ paths
1347
+ """
1348
+ if self.vs_ver < 14.0:
1349
+ return []
1350
+
1351
+ arch_subdir = self.pi.target_dir(x64=True)
1352
+ lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib')
1353
+ ucrtver = self._ucrt_subdir
1354
+ return [os.path.join(lib, f'{ucrtver}ucrt{arch_subdir}')]
1355
+
1356
+ @property
1357
+ def UCRTIncludes(self):
1358
+ """
1359
+ Microsoft Universal C Runtime SDK Include.
1360
+
1361
+ Return
1362
+ ------
1363
+ list of str
1364
+ paths
1365
+ """
1366
+ if self.vs_ver < 14.0:
1367
+ return []
1368
+
1369
+ include = os.path.join(self.si.UniversalCRTSdkDir, 'include')
1370
+ return [os.path.join(include, f'{self._ucrt_subdir}ucrt')]
1371
+
1372
+ @property
1373
+ def _ucrt_subdir(self):
1374
+ """
1375
+ Microsoft Universal C Runtime SDK version subdir.
1376
+
1377
+ Return
1378
+ ------
1379
+ str
1380
+ subdir
1381
+ """
1382
+ ucrtver = self.si.UniversalCRTSdkLastVersion
1383
+ return (f'{ucrtver}\\') if ucrtver else ''
1384
+
1385
+ @property
1386
+ def FSharp(self):
1387
+ """
1388
+ Microsoft Visual F#.
1389
+
1390
+ Return
1391
+ ------
1392
+ list of str
1393
+ paths
1394
+ """
1395
+ if 11.0 > self.vs_ver > 12.0:
1396
+ return []
1397
+
1398
+ return [self.si.FSharpInstallDir]
1399
+
1400
+ @property
1401
+ def VCRuntimeRedist(self) -> str | None:
1402
+ """
1403
+ Microsoft Visual C++ runtime redistributable dll.
1404
+
1405
+ Returns the first suitable path found or None.
1406
+ """
1407
+ vcruntime = f'vcruntime{self.vc_ver}0.dll'
1408
+ arch_subdir = self.pi.target_dir(x64=True).strip('\\')
1409
+
1410
+ # Installation prefixes candidates
1411
+ prefixes = []
1412
+ tools_path = self.si.VCInstallDir
1413
+ redist_path = os.path.dirname(tools_path.replace(r'\Tools', r'\Redist'))
1414
+ if os.path.isdir(redist_path):
1415
+ # Redist version may not be exactly the same as tools
1416
+ redist_path = os.path.join(redist_path, os.listdir(redist_path)[-1])
1417
+ prefixes += [redist_path, os.path.join(redist_path, 'onecore')]
1418
+
1419
+ prefixes += [os.path.join(tools_path, 'redist')] # VS14 legacy path
1420
+
1421
+ # CRT directory
1422
+ crt_dirs = (
1423
+ f'Microsoft.VC{self.vc_ver * 10}.CRT',
1424
+ # Sometime store in directory with VS version instead of VC
1425
+ f'Microsoft.VC{int(self.vs_ver) * 10}.CRT',
1426
+ )
1427
+
1428
+ # vcruntime path
1429
+ candidate_paths = (
1430
+ os.path.join(prefix, arch_subdir, crt_dir, vcruntime)
1431
+ for (prefix, crt_dir) in itertools.product(prefixes, crt_dirs)
1432
+ )
1433
+ return next(filter(os.path.isfile, candidate_paths), None) # type: ignore[arg-type] #python/mypy#12682
1434
+
1435
+ def return_env(self, exists: bool = True) -> _EnvironmentDict:
1436
+ """
1437
+ Return environment dict.
1438
+
1439
+ Parameters
1440
+ ----------
1441
+ exists: bool
1442
+ It True, only return existing paths.
1443
+
1444
+ Return
1445
+ ------
1446
+ dict
1447
+ environment
1448
+ """
1449
+ env = _EnvironmentDict(
1450
+ include=self._build_paths(
1451
+ 'include',
1452
+ [
1453
+ self.VCIncludes,
1454
+ self.OSIncludes,
1455
+ self.UCRTIncludes,
1456
+ self.NetFxSDKIncludes,
1457
+ ],
1458
+ exists,
1459
+ ),
1460
+ lib=self._build_paths(
1461
+ 'lib',
1462
+ [
1463
+ self.VCLibraries,
1464
+ self.OSLibraries,
1465
+ self.FxTools,
1466
+ self.UCRTLibraries,
1467
+ self.NetFxSDKLibraries,
1468
+ ],
1469
+ exists,
1470
+ ),
1471
+ libpath=self._build_paths(
1472
+ 'libpath',
1473
+ [self.VCLibraries, self.FxTools, self.VCStoreRefs, self.OSLibpath],
1474
+ exists,
1475
+ ),
1476
+ path=self._build_paths(
1477
+ 'path',
1478
+ [
1479
+ self.VCTools,
1480
+ self.VSTools,
1481
+ self.VsTDb,
1482
+ self.SdkTools,
1483
+ self.SdkSetup,
1484
+ self.FxTools,
1485
+ self.MSBuild,
1486
+ self.HTMLHelpWorkshop,
1487
+ self.FSharp,
1488
+ ],
1489
+ exists,
1490
+ ),
1491
+ )
1492
+ if self.vs_ver >= 14 and self.VCRuntimeRedist:
1493
+ env['py_vcruntime_redist'] = self.VCRuntimeRedist
1494
+ return env
1495
+
1496
+ def _build_paths(self, name, spec_path_lists, exists):
1497
+ """
1498
+ Given an environment variable name and specified paths,
1499
+ return a pathsep-separated string of paths containing
1500
+ unique, extant, directories from those paths and from
1501
+ the environment variable. Raise an error if no paths
1502
+ are resolved.
1503
+
1504
+ Parameters
1505
+ ----------
1506
+ name: str
1507
+ Environment variable name
1508
+ spec_path_lists: list of str
1509
+ Paths
1510
+ exists: bool
1511
+ It True, only return existing paths.
1512
+
1513
+ Return
1514
+ ------
1515
+ str
1516
+ Pathsep-separated paths
1517
+ """
1518
+ # flatten spec_path_lists
1519
+ spec_paths = itertools.chain.from_iterable(spec_path_lists)
1520
+ env_paths = environ.get(name, '').split(os.pathsep)
1521
+ paths = itertools.chain(spec_paths, env_paths)
1522
+ extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
1523
+ if not extant_paths:
1524
+ msg = f"{name.upper()} environment variable is empty"
1525
+ raise distutils.errors.DistutilsPlatformError(msg)
1526
+ unique_paths = unique_everseen(extant_paths)
1527
+ return os.pathsep.join(unique_paths)
videollama2/lib/python3.10/site-packages/setuptools/namespaces.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import os
3
+
4
+ from .compat import py312
5
+
6
+ from distutils import log
7
+
8
+ flatten = itertools.chain.from_iterable
9
+
10
+
11
+ class Installer:
12
+ nspkg_ext = '-nspkg.pth'
13
+
14
+ def install_namespaces(self) -> None:
15
+ nsp = self._get_all_ns_packages()
16
+ if not nsp:
17
+ return
18
+ filename = self._get_nspkg_file()
19
+ self.outputs.append(filename)
20
+ log.info("Installing %s", filename)
21
+ lines = map(self._gen_nspkg_line, nsp)
22
+
23
+ if self.dry_run:
24
+ # always generate the lines, even in dry run
25
+ list(lines)
26
+ return
27
+
28
+ with open(filename, 'wt', encoding=py312.PTH_ENCODING) as f:
29
+ # Python<3.13 requires encoding="locale" instead of "utf-8"
30
+ # See: python/cpython#77102
31
+ f.writelines(lines)
32
+
33
+ def uninstall_namespaces(self) -> None:
34
+ filename = self._get_nspkg_file()
35
+ if not os.path.exists(filename):
36
+ return
37
+ log.info("Removing %s", filename)
38
+ os.remove(filename)
39
+
40
+ def _get_nspkg_file(self):
41
+ filename, _ = os.path.splitext(self._get_target())
42
+ return filename + self.nspkg_ext
43
+
44
+ def _get_target(self):
45
+ return self.target
46
+
47
+ _nspkg_tmpl = (
48
+ "import sys, types, os",
49
+ "p = os.path.join(%(root)s, *%(pth)r)",
50
+ "importlib = __import__('importlib.util')",
51
+ "__import__('importlib.machinery')",
52
+ (
53
+ "m = "
54
+ "sys.modules.setdefault(%(pkg)r, "
55
+ "importlib.util.module_from_spec("
56
+ "importlib.machinery.PathFinder.find_spec(%(pkg)r, "
57
+ "[os.path.dirname(p)])))"
58
+ ),
59
+ ("m = m or sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))"),
60
+ "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
61
+ "(p not in mp) and mp.append(p)",
62
+ )
63
+ "lines for the namespace installer"
64
+
65
+ _nspkg_tmpl_multi = ('m and setattr(sys.modules[%(parent)r], %(child)r, m)',)
66
+ "additional line(s) when a parent package is indicated"
67
+
68
+ def _get_root(self):
69
+ return "sys._getframe(1).f_locals['sitedir']"
70
+
71
+ def _gen_nspkg_line(self, pkg):
72
+ pth = tuple(pkg.split('.'))
73
+ root = self._get_root()
74
+ tmpl_lines = self._nspkg_tmpl
75
+ parent, sep, child = pkg.rpartition('.')
76
+ if parent:
77
+ tmpl_lines += self._nspkg_tmpl_multi
78
+ return ';'.join(tmpl_lines) % locals() + '\n'
79
+
80
+ def _get_all_ns_packages(self):
81
+ """Return sorted list of all package namespaces"""
82
+ pkgs = self.distribution.namespace_packages or []
83
+ return sorted(set(flatten(map(self._pkg_names, pkgs))))
84
+
85
+ @staticmethod
86
+ def _pkg_names(pkg):
87
+ """
88
+ Given a namespace package, yield the components of that
89
+ package.
90
+
91
+ >>> names = Installer._pkg_names('a.b.c')
92
+ >>> set(names) == set(['a', 'a.b', 'a.b.c'])
93
+ True
94
+ """
95
+ parts = pkg.split('.')
96
+ while parts:
97
+ yield '.'.join(parts)
98
+ parts.pop()
99
+
100
+
101
+ class DevelopInstaller(Installer):
102
+ def _get_root(self):
103
+ return repr(str(self.egg_path))
104
+
105
+ def _get_target(self):
106
+ return self.egg_link
videollama2/lib/python3.10/site-packages/setuptools/script (dev).tmpl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
2
+ __requires__ = %(spec)r
3
+ __import__('pkg_resources').require(%(spec)r)
4
+ __file__ = %(dev_path)r
5
+ with open(__file__) as f:
6
+ exec(compile(f.read(), __file__, 'exec'))
videollama2/lib/python3.10/site-packages/websockets/extensions/__pycache__/permessage_deflate.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
videollama2/lib/python3.10/site-packages/websockets/legacy/__pycache__/protocol.cpython-310.pyc ADDED
Binary file (41.6 kB). View file
 
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _cast_Float {
18
+ using schema = at::Tensor (const at::Tensor &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Float")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Float(Tensor self, bool non_blocking=False) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, bool non_blocking);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking);
26
+ };
27
+
28
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _conj_copy_out(at::Tensor & out, const at::Tensor & self);
21
+ TORCH_API at::Tensor & _conj_copy_outf(const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_rnn_flatten_weight_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _cudnn_rnn_flatten_weight_out_symint(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out);
20
+ TORCH_API at::Tensor _cudnn_rnn_flatten_weight(at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional);
21
+ } // namespace native
22
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> _embedding_bag_out(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const ::std::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3);
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_cpu(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const ::std::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1);
21
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_cuda(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const ::std::optional<at::Tensor> & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1);
22
+ } // namespace native
23
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_copy_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::vector<at::Tensor> _foreach_copy(at::TensorList self, at::TensorList src, bool non_blocking=false);
20
+ TORCH_API void _foreach_copy_out(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out);
21
+ TORCH_API void foreach_tensor_copy_list_kernel_slow_(at::TensorList self, at::TensorList src, bool non_blocking=false);
22
+ TORCH_API void foreach_tensor_copy_list_kernel_cuda_(at::TensorList self, at::TensorList src, bool non_blocking=false);
23
+ } // namespace native
24
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cosh_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_cosh {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cosh")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cosh(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_cosh_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cosh_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cosh_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_cosh_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_cosh")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_reciprocal_slow(at::TensorList self);
20
+ TORCH_API void _foreach_reciprocal_out(at::TensorList self, at::TensorList out);
21
+ TORCH_API void foreach_tensor_reciprocal_slow_(at::TensorList self);
22
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_reciprocal_cuda(at::TensorList self);
23
+ TORCH_API void foreach_tensor_reciprocal_cuda_(at::TensorList self);
24
+ } // namespace native
25
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sigmoid_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::vector<at::Tensor> _foreach_sigmoid(at::TensorList self);
21
+ TORCH_API void _foreach_sigmoid_out(at::TensorList out, at::TensorList self);
22
+ TORCH_API void _foreach_sigmoid_outf(at::TensorList self, at::TensorList out);
23
+ TORCH_API void _foreach_sigmoid_(at::TensorList self);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/_index_put_impl_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)
26
+ inline at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) {
27
+ return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
28
+ }
29
+
30
+ // aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _index_put_impl_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) {
32
+ return at::_ops::_index_put_impl_out::call(self, indices, values, accumulate, unsafe, out);
33
+ }
34
+ // aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _index_put_impl_outf(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) {
36
+ return at::_ops::_index_put_impl_out::call(self, indices, values, accumulate, unsafe, out);
37
+ }
38
+
39
+ // aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor
40
+ inline at::Tensor _index_put_impl(const at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) {
41
+ return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
42
+ }
43
+
44
+ }
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_offsets.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/_nested_get_offsets_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_nested_get_offsets(Tensor self) -> Tensor
26
+ inline at::Tensor _nested_get_offsets(const at::Tensor & self) {
27
+ return at::_ops::_nested_get_offsets::call(self);
28
+ }
29
+
30
+ }
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_enum_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _pad_enum(const at::Tensor & self, at::IntArrayRef pad, int64_t mode, ::std::optional<double> value=::std::nullopt);
21
+ TORCH_API at::Tensor _pad_enum_symint(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, ::std::optional<double> value=::std::nullopt);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_forward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _pdist_forward {
18
+ using schema = at::Tensor (const at::Tensor &, double);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pdist_forward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pdist_forward(Tensor self, float p=2) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, double p);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p);
26
+ };
27
+
28
+ struct TORCH_API _pdist_forward_out {
29
+ using schema = at::Tensor & (const at::Tensor &, double, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pdist_forward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, double p, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_tensor_unsafe_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _sparse_csr_tensor_unsafe {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_tensor_unsafe")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
26
+ };
27
+
28
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _sparse_sum(const at::Tensor & self);
21
+ TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype);
22
+ TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_stack_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _stack {
18
+ using schema = at::Tensor (at::TensorList, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_stack")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_stack(Tensor[] tensors, int dim=0) -> Tensor")
24
+ static at::Tensor call(at::TensorList tensors, int64_t dim);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim);
26
+ };
27
+
28
+ struct TORCH_API _stack_out {
29
+ using schema = at::Tensor & (at::TensorList, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_stack")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(at::TensorList tensors, int64_t dim, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const ::std::optional<at::Tensor> & mask={}, ::std::optional<int64_t> mask_type=::std::nullopt);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _unique(const at::Tensor & self, bool sorted=true, bool return_inverse=false);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_native.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/_upsample_bilinear2d_aa_backward_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured__upsample_bilinear2d_aa_backward_out_cpu : public at::meta::structured__upsample_bilinear2d_aa_backward {
20
+ void impl(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, const at::Tensor & grad_input);
21
+ };
22
+ struct TORCH_API structured__upsample_bilinear2d_aa_backward_out_cuda : public at::meta::structured__upsample_bilinear2d_aa_backward {
23
+ void impl(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, bool align_corners, ::std::optional<double> scales_h, ::std::optional<double> scales_w, const at::Tensor & grad_input);
24
+ };
25
+ } // namespace native
26
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured__upsample_nearest_exact3d : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, at::ArrayRef<int64_t> output_size, ::std::optional<double> scales_d, ::std::optional<double> scales_h, ::std::optional<double> scales_w);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsr_tensor_args.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/_validate_sparse_bsr_tensor_args_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()
26
+ inline void _validate_sparse_bsr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
27
+ return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size);
28
+ }
29
+
30
+ }
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor acos(const at::Tensor & self);
21
+ TORCH_API at::Tensor & acos_(at::Tensor & self);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/add_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
21
+ TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
22
+ TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out);
23
+ TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
24
+
25
+ } // namespace meta
26
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_all_dim : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, int64_t dim, bool keepdim);
24
+ };
25
+ struct TORCH_API structured_all_dims : public at::impl::MetaBase {
26
+
27
+
28
+ void meta(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim);
29
+ };
30
+ struct TORCH_API structured_all : public at::impl::MetaBase {
31
+
32
+
33
+ void meta(const at::Tensor & self);
34
+ };
35
+
36
+ } // namespace native
37
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atan_cpu_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor atan(const at::Tensor & self);
21
+ TORCH_API at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & atan_(at::Tensor & self);
24
+
25
+ } // namespace cpu
26
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
21
+ TORCH_API at::Tensor & binary_cross_entropy_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
22
+ TORCH_API at::Tensor & binary_cross_entropy_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/cartesian_prod_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor cartesian_prod(at::TensorList tensors);
20
+ } // namespace native
21
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/celu_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API celu {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::celu")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "celu(Tensor self, Scalar alpha=1.0) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & alpha);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha);
26
+ };
27
+
28
+ struct TORCH_API celu_ {
29
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::celu_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & alpha);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha);
37
+ };
38
+
39
+ struct TORCH_API celu_out {
40
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::celu")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/chunk_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API chunk {
18
+ using schema = ::std::vector<at::Tensor> (const at::Tensor &, int64_t, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::chunk")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]")
24
+ static ::std::vector<at::Tensor> call(const at::Tensor & self, int64_t chunks, int64_t dim);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim);
26
+ };
27
+
28
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/diagflat_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor diagflat(const at::Tensor & self, int64_t offset=0);
20
+ } // namespace native
21
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor digamma(const at::Tensor & self);
21
+ TORCH_API at::Tensor & digamma_(at::Tensor & self);
22
+
23
+ } // namespace compositeexplicitautogradnonfunctional
24
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/dot_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & dot_out(const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out);
20
+ TORCH_API at::Tensor dot(const at::Tensor & self, const at::Tensor & tensor);
21
+ TORCH_API at::Tensor dot_cuda(const at::Tensor & self, const at::Tensor & tensor);
22
+ } // namespace native
23
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/fake_quantize_per_tensor_affine_cachemask_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
27
+ return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
28
+ }
29
+
30
+ // aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
32
+ return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self, scale, zero_point, quant_min, quant_max, out0, out1);
33
+ }
34
+ // aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_tensor_affine_cachemask_outf(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
36
+ return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::call(self, scale, zero_point, quant_min, quant_max, out0, out1);
37
+ }
38
+
39
+ }
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor fbgemm_linear_fp16_weight(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias);
20
+ } // namespace native
21
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/feature_alpha_dropout.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/feature_alpha_dropout_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
26
+ inline at::Tensor feature_alpha_dropout(const at::Tensor & input, double p, bool train) {
27
+ return at::_ops::feature_alpha_dropout::call(input, p, train);
28
+ }
29
+
30
+ // aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
31
+ inline at::Tensor & feature_alpha_dropout_(at::Tensor & self, double p, bool train) {
32
+ return at::_ops::feature_alpha_dropout_::call(self, p, train);
33
+ }
34
+
35
+ }
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fft_fft {
18
+ using schema = at::Tensor (const at::Tensor &, ::std::optional<c10::SymInt>, int64_t, ::std::optional<c10::string_view>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_fft")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm);
26
+ };
27
+
28
+ struct TORCH_API fft_fft_out {
29
+ using schema = at::Tensor & (const at::Tensor &, ::std::optional<c10::SymInt>, int64_t, ::std::optional<c10::string_view>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_fft")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fft_ifft {
18
+ using schema = at::Tensor (const at::Tensor &, ::std::optional<c10::SymInt>, int64_t, ::std::optional<c10::string_view>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ifft")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm);
26
+ };
27
+
28
+ struct TORCH_API fft_ifft_out {
29
+ using schema = at::Tensor & (const at::Tensor &, ::std::optional<c10::SymInt>, int64_t, ::std::optional<c10::string_view>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ifft")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none");
21
+ TORCH_API at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none");
22
+ TORCH_API at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out);
23
+ TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none");
24
+
25
+ } // namespace meta
26
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/glu_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1);
21
+ TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1);
22
+ TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/greater_equal_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & greater_equal_(at::Tensor & self, const at::Scalar & other);
24
+ TORCH_API at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other);
25
+ TORCH_API at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
26
+ TORCH_API at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
27
+ TORCH_API at::Tensor & greater_equal_(at::Tensor & self, const at::Tensor & other);
28
+
29
+ } // namespace compositeimplicitautograd
30
+ } // namespace at
vllm/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_2d_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API grid_sampler_2d {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::grid_sampler_2d")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
26
+ };
27
+
28
+ struct TORCH_API grid_sampler_2d_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::grid_sampler_2d")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops