diff --git a/vllm/lib/python3.10/site-packages/cupyx/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0fde844a2df11fc4ecdc1ec2c5e4177bd0040ab7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/__init__.py @@ -0,0 +1,33 @@ +# "NOQA" to suppress flake8 warning +from cupyx._rsqrt import rsqrt # NOQA +from cupyx._runtime import get_runtime_info # NOQA +from cupyx._scatter import scatter_add # NOQA +from cupyx._scatter import scatter_max # NOQA +from cupyx._scatter import scatter_min # NOQA + +from cupyx import linalg # NOQA +from cupyx import time # NOQA +from cupyx import scipy # NOQA +from cupyx import optimizing # NOQA + +from cupyx._ufunc_config import errstate # NOQA +from cupyx._ufunc_config import geterr # NOQA +from cupyx._ufunc_config import seterr # NOQA +from cupy._core.syncdetect import allow_synchronize # NOQA +from cupy._core.syncdetect import DeviceSynchronized # NOQA + +from cupyx._pinned_array import empty_pinned # NOQA +from cupyx._pinned_array import empty_like_pinned # NOQA +from cupyx._pinned_array import zeros_pinned # NOQA +from cupyx._pinned_array import zeros_like_pinned # NOQA + +from cupyx._gufunc import GeneralizedUFunc # NOQA + + +def __getattr__(key): + if key == 'lapack': + import cupyx.lapack + return cupyx.lapack + + raise AttributeError( + "module '{}' has no attribute '{}'".format(__name__, key)) diff --git a/vllm/lib/python3.10/site-packages/cupyx/_gufunc.py b/vllm/lib/python3.10/site-packages/cupyx/_gufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..a38b361d9d72cfa8ce6320f0eb06e430843755bb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_gufunc.py @@ -0,0 +1,5 @@ +import cupy + + +class GeneralizedUFunc(cupy._core._gufuncs._GUFunc): + __doc__ = cupy._core._gufuncs._GUFunc.__doc__ diff --git a/vllm/lib/python3.10/site-packages/cupyx/_pinned_array.py b/vllm/lib/python3.10/site-packages/cupyx/_pinned_array.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b8c9c813b4b513e26bf4199b59947316784838 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_pinned_array.py @@ -0,0 +1,141 @@ +import numpy + +from cupy import cuda +from cupy._creation.basic import _new_like_order_and_strides +from cupy._core import internal + + +def _update_shape(a, shape): + if shape is None and a is not None: + shape = a.shape + elif isinstance(shape, int): + shape = (shape,) + else: + shape = tuple(shape) + return shape + + +def empty_pinned(shape, dtype=float, order='C'): + """Returns a new, uninitialized NumPy array with the given shape + and dtype. + + This is a convenience function which is just :func:`numpy.empty`, + except that the underlying memory is pinned/pagelocked. + + Args: + shape (int or tuple of ints): Dimensionalities of the array. + dtype: Data type specifier. + order ({'C', 'F'}): Row-major (C-style) or column-major + (Fortran-style) order. + + Returns: + numpy.ndarray: A new array with elements not initialized. + + .. seealso:: :func:`numpy.empty` + + """ + shape = _update_shape(None, shape) + nbytes = internal.prod(shape) * numpy.dtype(dtype).itemsize + mem = cuda.alloc_pinned_memory(nbytes) + out = numpy.ndarray(shape, dtype=dtype, buffer=mem, order=order) + return out + + +def empty_like_pinned(a, dtype=None, order='K', subok=None, shape=None): + """Returns a new, uninitialized NumPy array with the same shape and dtype + as those of the given array. + + This is a convenience function which is just :func:`numpy.empty_like`, + except that the underlying memory is pinned/pagelocked. + + This function currently does not support ``subok`` option. + + Args: + a (numpy.ndarray or cupy.ndarray): Base array. + dtype: Data type specifier. The data type of ``a`` is used by default. + order ({'C', 'F', 'A', or 'K'}): Overrides the memory layout of the + result. ``'C'`` means C-order, ``'F'`` means F-order, ``'A'`` means + ``'F'`` if ``a`` is Fortran contiguous, ``'C'`` otherwise. + ``'K'`` means match the layout of ``a`` as closely as possible. + subok: Not supported yet, must be None. + shape (int or tuple of ints): Overrides the shape of the result. If + ``order='K'`` and the number of dimensions is unchanged, will try + to keep order, otherwise, ``order='C'`` is implied. + + Returns: + numpy.ndarray: A new array with same shape and dtype of ``a`` with + elements not initialized. + + .. seealso:: :func:`numpy.empty_like` + + """ + # We're kinda duplicating the code here because order='K' needs special + # treatment: strides need to be computed + if subok is not None: + raise TypeError('subok is not supported yet') + if dtype is None: + dtype = a.dtype + shape = _update_shape(a, shape) + order, strides, _ = _new_like_order_and_strides( + a, dtype, order, shape, get_memptr=False) + nbytes = internal.prod(shape) * numpy.dtype(dtype).itemsize + mem = cuda.alloc_pinned_memory(nbytes) + out = numpy.ndarray(shape, dtype=dtype, buffer=mem, + strides=strides, order=order) + return out + + +def zeros_pinned(shape, dtype=float, order='C'): + """Returns a new, zero-initialized NumPy array with the given shape + and dtype. + + This is a convenience function which is just :func:`numpy.zeros`, + except that the underlying memory is pinned/pagelocked. + + Args: + shape (int or tuple of ints): Dimensionalities of the array. + dtype: Data type specifier. + order ({'C', 'F'}): Row-major (C-style) or column-major + (Fortran-style) order. + + Returns: + numpy.ndarray: An array filled with zeros. + + .. seealso:: :func:`numpy.zeros` + + """ + out = empty_pinned(shape, dtype, order) + numpy.copyto(out, 0, casting='unsafe') + return out + + +def zeros_like_pinned(a, dtype=None, order='K', subok=None, shape=None): + """Returns a new, zero-initialized NumPy array with the same shape and dtype + as those of the given array. + + This is a convenience function which is just :func:`numpy.zeros_like`, + except that the underlying memory is pinned/pagelocked. + + This function currently does not support ``subok`` option. + + Args: + a (numpy.ndarray or cupy.ndarray): Base array. + dtype: Data type specifier. The dtype of ``a`` is used by default. + order ({'C', 'F', 'A', or 'K'}): Overrides the memory layout of the + result. ``'C'`` means C-order, ``'F'`` means F-order, ``'A'`` means + ``'F'`` if ``a`` is Fortran contiguous, ``'C'`` otherwise. + ``'K'`` means match the layout of ``a`` as closely as possible. + subok: Not supported yet, must be None. + shape (int or tuple of ints): Overrides the shape of the result. If + ``order='K'`` and the number of dimensions is unchanged, will try + to keep order, otherwise, ``order='C'`` is implied. + + Returns: + numpy.ndarray: An array filled with zeros. + + .. seealso:: :func:`numpy.zeros_like` + + """ # NOQA + out = empty_like_pinned(a, dtype, order, subok, shape) + numpy.copyto(out, 0, casting='unsafe') + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/_rsqrt.py b/vllm/lib/python3.10/site-packages/cupyx/_rsqrt.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d958aade908d19939c1999c330d64a891747b9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_rsqrt.py @@ -0,0 +1,7 @@ +from cupy._core.core import create_ufunc + +rsqrt = create_ufunc( + 'cupy_rsqrt', + ('e->e', 'f->f', 'd->d', 'F->F', 'D->D'), + 'out0 = rsqrt(in0)', + doc='''Returns the reciprocal square root.''') diff --git a/vllm/lib/python3.10/site-packages/cupyx/_runtime.py b/vllm/lib/python3.10/site-packages/cupyx/_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..57c8a11056bcc00961320f9a938769dcc34b4739 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_runtime.py @@ -0,0 +1,357 @@ +import inspect +import io +import os +import platform +import warnings + +import numpy + +import cupy +import cupy_backends + + +is_hip = cupy_backends.cuda.api.runtime.is_hip + + +def _eval_or_error(func, errors): + # Evaluates `func` and return the result. + # If an error specified by `errors` occurred, it returns a string + # representing the error. + try: + return func() + except errors as e: + return repr(e) + + +class _InstallInfo(object): + + # TODO(niboshi): Add is_binary_distribution + + def __init__(self): + cupy_package_root = self._get_cupy_package_root() + if cupy_package_root is not None: + data_root = os.path.join(cupy_package_root, '.data') + data_paths = { + 'lib': _dir_or_none(os.path.join(data_root, 'lib')), + 'include': _dir_or_none(os.path.join(data_root, 'include')), + } + else: + data_paths = { + 'lib': None, + 'include': None, + } + + self.cupy_package_root = cupy_package_root + self.data_paths = data_paths + + def get_data_path(self, data_type): + if data_type not in self.data_paths: + raise ValueError('Invalid data type: {}'.format(data_type)) + return self.data_paths[data_type] + + def _get_cupy_package_root(self): + try: + cupy_path = inspect.getfile(cupy) + except TypeError: + return None + return os.path.dirname(cupy_path) + + +class _RuntimeInfo: + + cupy_version = None + cuda_path = None + + # CUDA Driver + cuda_build_version = None + cuda_driver_version = None + + # CUDA Runtime + cuda_runtime_version = None + cuda_local_runtime_version = None + + # CUDA Toolkit + cublas_version = None + cufft_version = None + curand_version = None + cusolver_version = None + cusparse_version = None + nvrtc_version = None + thrust_version = None + cuda_extra_include_dirs = None + + # Optional Libraries + cudnn_build_version = None + cudnn_version = None + nccl_build_version = None + nccl_runtime_version = None + cub_build_version = None + jitify_build_version = None + cutensor_version = None + cusparselt_version = None + cython_build_version = None + cython_version = None + + numpy_version = None + scipy_version = None + + def __init__(self, *, full=True): + self.cupy_version = cupy.__version__ + + if not is_hip: + self.cuda_path = cupy.cuda.get_cuda_path() + else: + self.cuda_path = cupy._environment.get_rocm_path() + + if not is_hip: + self.nvcc_path = cupy._environment.get_nvcc_path() + else: + self.nvcc_path = cupy._environment.get_hipcc_path() + + # CUDA Driver + self.cuda_build_version = str(cupy.cuda.driver.get_build_version()) + if cupy.cuda.driver._is_cuda_python(): + try: + import cuda.bindings + cuda_version = cuda.bindings.__version__ + except ImportError: + import cuda + cuda_version = cuda.__version__ + self.cuda_build_version += f' (CUDA Python: {cuda_version})' + self.cuda_driver_version = _eval_or_error( + cupy.cuda.runtime.driverGetVersion, + cupy.cuda.runtime.CUDARuntimeError) + + # CUDA Runtime + self.cuda_runtime_version = _eval_or_error( + cupy.cuda.runtime.runtimeGetVersion, + cupy.cuda.runtime.CUDARuntimeError) + self.cuda_local_runtime_version = _eval_or_error( + cupy.cuda.get_local_runtime_version, + Exception) + + # cuBLAS + self.cublas_version = '(available)' + if full: + self.cublas_version = _eval_or_error( + lambda: cupy.cuda.cublas.getVersion( + cupy.cuda.device.get_cublas_handle()), + Exception) + + # cuFFT + try: + from cupy.cuda import cufft + self.cufft_version = _eval_or_error( + lambda: cufft.getVersion(), Exception) + except ImportError: + pass + + # cuRAND + self.curand_version = _eval_or_error( + lambda: cupy.cuda.curand.getVersion(), + Exception) + + # cuSOLVER + self.cusolver_version = _eval_or_error( + lambda: cupy.cuda.cusolver._getVersion(), + Exception) + + # cuSPARSE + self.cusparse_version = '(available)' + if full: + self.cusparse_version = _eval_or_error( + lambda: cupy.cuda.cusparse.getVersion( + cupy.cuda.device.get_cusparse_handle()), + Exception) + + # NVRTC + self.nvrtc_version = _eval_or_error( + lambda: cupy.cuda.nvrtc.getVersion(), + Exception) + + # Thrust + try: + import cupy.cuda.thrust as thrust + self.thrust_version = thrust.get_build_version() + except ImportError: + pass + + # CUDA Extra Include Dirs + if not is_hip: + try: + nvrtc_version = cupy.cuda.nvrtc.getVersion() + except Exception: + nvrtc_version = None + if nvrtc_version is None: + self.cuda_extra_include_dirs = '(NVRTC unavailable)' + else: + self.cuda_extra_include_dirs = str( + cupy._environment._get_include_dir_from_conda_or_wheel( + *nvrtc_version)) + + # cuDNN + if cupy._environment._can_attempt_preload('cudnn'): + if full: + cupy._environment._preload_library('cudnn') + else: + self.cudnn_build_version = ( + '(not loaded; try `import cupy.cuda.cudnn` first)') + self.cudnn_version = self.cudnn_build_version + try: + import cupy_backends.cuda.libs.cudnn as cudnn + self.cudnn_build_version = cudnn.get_build_version() + self.cudnn_version = _eval_or_error( + cudnn.getVersion, cudnn.CuDNNError) + except ImportError: + pass + + # NCCL + if cupy._environment._can_attempt_preload('nccl'): + if full: + cupy._environment._preload_library('nccl') + else: + self.nccl_build_version = ( + '(not loaded; try `import cupy.cuda.nccl` first)') + self.nccl_runtime_version = self.nccl_build_version + try: + import cupy_backends.cuda.libs.nccl as nccl + self.nccl_build_version = nccl.get_build_version() + nccl_runtime_version = nccl.get_version() + if nccl_runtime_version == 0: + nccl_runtime_version = '(unknown)' + self.nccl_runtime_version = nccl_runtime_version + except ImportError: + pass + + # CUB + self.cub_build_version = cupy.cuda.cub.get_build_version() + + try: + import cupy.cuda.jitify as jitify + self.jitify_build_version = jitify.get_build_version() + except ImportError: + pass + + # cuTENSOR + try: + import cupy_backends.cuda.libs.cutensor as cutensor + self.cutensor_version = cutensor.get_version() + except ImportError: + pass + + # cuSparseLT + try: + import cupy_backends.cuda.libs.cusparselt as cusparselt + self.cusparselt_version = cusparselt.get_build_version() + except ImportError: + pass + + # Cython + self.cython_build_version = cupy._util.cython_build_ver + try: + import Cython + self.cython_version = Cython.__version__ + except ImportError: + pass + + # NumPy + self.numpy_version = numpy.version.full_version + + # SciPy + try: + import scipy + self.scipy_version = scipy.version.full_version + except ImportError: + pass + + def __str__(self): + records = [ + ('OS', platform.platform()), + ('Python Version', platform.python_version()), + ('CuPy Version', self.cupy_version), + ('CuPy Platform', 'NVIDIA CUDA' if not is_hip else 'AMD ROCm'), + ('NumPy Version', self.numpy_version), + ('SciPy Version', self.scipy_version), + ('Cython Build Version', self.cython_build_version), + ('Cython Runtime Version', self.cython_version), + ('CUDA Root', self.cuda_path), + ('hipcc PATH' if is_hip else 'nvcc PATH', self.nvcc_path), + + ('CUDA Build Version', self.cuda_build_version), + ('CUDA Driver Version', self.cuda_driver_version), + + ('CUDA Runtime Version', ( + f'{self.cuda_runtime_version} (linked to CuPy) / ' + f'{self.cuda_local_runtime_version} (locally installed)' + )), + ('CUDA Extra Include Dirs', self.cuda_extra_include_dirs), + ] + + records += [ + ('cuBLAS Version', self.cublas_version), + ('cuFFT Version', self.cufft_version), + ('cuRAND Version', self.curand_version), + ('cuSOLVER Version', self.cusolver_version), + ('cuSPARSE Version', self.cusparse_version), + ('NVRTC Version', self.nvrtc_version), + ('Thrust Version', self.thrust_version), + ('CUB Build Version', self.cub_build_version), + ('Jitify Build Version', self.jitify_build_version), + ] + + records += [ + ('cuDNN Build Version', self.cudnn_build_version), + ('cuDNN Version', self.cudnn_version), + ('NCCL Build Version', self.nccl_build_version), + ('NCCL Runtime Version', self.nccl_runtime_version), + ('cuTENSOR Version', self.cutensor_version), + ('cuSPARSELt Build Version', self.cusparselt_version), + ] + + device_count = 0 + try: + device_count = cupy.cuda.runtime.getDeviceCount() + except cupy.cuda.runtime.CUDARuntimeError as e: + if 'ErrorNoDevice' not in e.args[0]: + warnings.warn(f'Failed to detect number of GPUs: {e}') + # No GPU devices available. + for device_id in range(device_count): + with cupy.cuda.Device(device_id) as device: + props = cupy.cuda.runtime.getDeviceProperties(device_id) + name = ('Device {} Name'.format(device_id), + props['name'].decode()) + pci_bus = ('Device {} PCI Bus ID'.format(device_id), + device.pci_bus_id) + if is_hip: + try: + arch = props['gcnArchName'].decode() + except KeyError: # ROCm < 3.6.0 + arch = 'gfx'+str(props['gcnArch']) + arch = ('Device {} Arch'.format(device_id), arch) + else: + arch = ('Device {} Compute Capability'.format(device_id), + device.compute_capability) + records += [name, arch, pci_bus] + + width = max([len(r[0]) for r in records]) + 2 + fmt = '{:' + str(width) + '}: {}\n' + s = io.StringIO() + for k, v in records: + s.write(fmt.format(k, v)) + + return s.getvalue() + + +def get_runtime_info(*, full=True): + return _RuntimeInfo(full=full) + + +def get_install_info(): + return _InstallInfo() + + +def _dir_or_none(path): + """Returns None if path does not exist.""" + if os.path.isdir(path): + return path + return None diff --git a/vllm/lib/python3.10/site-packages/cupyx/_scatter.py b/vllm/lib/python3.10/site-packages/cupyx/_scatter.py new file mode 100644 index 0000000000000000000000000000000000000000..182d4cb19212812cd806b86dc274386678bc6493 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_scatter.py @@ -0,0 +1,131 @@ +def scatter_add(a, slices, value): + """Adds given values to specified elements of an array. + + It adds ``value`` to the specified elements of ``a``. + If all of the indices target different locations, the operation of + :func:`scatter_add` is equivalent to ``a[slices] = a[slices] + value``. + If there are multiple elements targeting the same location, + :func:`scatter_add` uses all of these values for addition. On the other + hand, ``a[slices] = a[slices] + value`` only adds the contribution from one + of the indices targeting the same location. + + Note that just like an array indexing, negative indices are interpreted as + counting from the end of an array. + + Also note that :func:`scatter_add` behaves identically + to :func:`numpy.add.at`. + + Example + ------- + >>> import cupy + >>> import cupyx + >>> a = cupy.zeros((6,), dtype=cupy.float32) + >>> i = cupy.array([1, 0, 1]) + >>> v = cupy.array([1., 1., 1.]) + >>> cupyx.scatter_add(a, i, v); + >>> a + array([1., 2., 0., 0., 0., 0.], dtype=float32) + + Args: + a (ndarray): An array that gets added. + slices: It is integer, slices, ellipsis, numpy.newaxis, + integer array-like, boolean array-like or tuple of them. + It works for slices used for + :func:`cupy.ndarray.__getitem__` and + :func:`cupy.ndarray.__setitem__`. + v (array-like): Values to increment ``a`` at referenced locations. + + .. note:: + It only supports types that are supported by CUDA's atomicAdd when + an integer array is included in ``slices``. + The supported types are ``numpy.float32``, ``numpy.int32``, + ``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``. + + .. note:: + :func:`scatter_add` does not raise an error when indices exceed size of + axes. Instead, it wraps indices. + + .. seealso:: :meth:`numpy.ufunc.at`. + + """ + a.scatter_add(slices, value) + + +def scatter_max(a, slices, value): + """Stores a maximum value of elements specified by indices to an array. + + It stores the maximum value of elements in ``value`` array indexed by + ``slices`` to ``a``. If all of the indices target different locations, + the operation of :func:`scatter_max` is equivalent to + ``a[slices] = cupy.maximum(a[slices], value)``. + If there are multiple elements targeting the same location, + :func:`scatter_max` stores the maximum of all of these values to the given + index of ``a``, the initial element of ``a`` is also taken in account. + + Note that just like an array indexing, negative indices are interpreted as + counting from the end of an array. + + Also note that :func:`scatter_max` behaves identically + to :func:`numpy.maximum.at`. + + Example + ------- + >>> import numpy + >>> import cupy + >>> a = cupy.zeros((6,), dtype=numpy.float32) + >>> i = cupy.array([1, 0, 1, 2]) + >>> v = cupy.array([1., 2., 3., -1.]) + >>> cupyx.scatter_max(a, i, v); + >>> a + array([2., 3., 0., 0., 0., 0.], dtype=float32) + + Args: + a (ndarray): An array to store the results. + slices: It is integer, slices, ellipsis, numpy.newaxis, + integer array-like, boolean array-like or tuple of them. + It works for slices used for + :func:`cupy.ndarray.__getitem__` and + :func:`cupy.ndarray.__setitem__`. + v (array-like): An array used for reference. + """ + a.scatter_max(slices, value) + + +def scatter_min(a, slices, value): + """Stores a minimum value of elements specified by indices to an array. + + It stores the minimum value of elements in ``value`` array indexed by + ``slices`` to ``a``. If all of the indices target different locations, + the operation of :func:`scatter_min` is equivalent to + ``a[slices] = cupy.minimum(a[slices], value)``. + If there are multiple elements targeting the same location, + :func:`scatter_min` stores the minimum of all of these values to the given + index of ``a``, the initial element of ``a`` is also taken in account. + + Note that just like an array indexing, negative indices are interpreted as + counting from the end of an array. + + Also note that :func:`scatter_min` behaves identically + to :func:`numpy.minimum.at`. + + Example + ------- + >>> import numpy + >>> import cupy + >>> a = cupy.zeros((6,), dtype=numpy.float32) + >>> i = cupy.array([1, 0, 1, 2]) + >>> v = cupy.array([1., 2., 3., -1.]) + >>> cupyx.scatter_min(a, i, v); + >>> a + array([ 0., 0., -1., 0., 0., 0.], dtype=float32) + + Args: + a (ndarray): An array to store the results. + slices: It is integer, slices, ellipsis, numpy.newaxis, + integer array-like, boolean array-like or tuple of them. + It works for slices used for + :func:`cupy.ndarray.__getitem__` and + :func:`cupy.ndarray.__setitem__`. + v (array-like): An array used for reference. + """ + a.scatter_min(slices, value) diff --git a/vllm/lib/python3.10/site-packages/cupyx/_texture.py b/vllm/lib/python3.10/site-packages/cupyx/_texture.py new file mode 100644 index 0000000000000000000000000000000000000000..179b22fdb03c53d91230315734183404f36890e8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_texture.py @@ -0,0 +1,197 @@ +import cupy + +from cupy import _core +from cupy.cuda import texture +from cupy.cuda import runtime + + +_affine_transform_2d_array_kernel = _core.ElementwiseKernel( + 'U texObj, raw float32 m, uint64 width', 'T transformed_image', + ''' + float3 pixel = make_float3( + (float)(i / width), + (float)(i % width), + 1.0f + ); + float x = dot(pixel, make_float3(m[0], m[1], m[2])) + .5f; + float y = dot(pixel, make_float3(m[3], m[4], m[5])) + .5f; + transformed_image = tex2D(texObj, y, x); + ''', + 'cupyx_texture_affine_transformation_2d_array', + preamble=''' + inline __host__ __device__ float dot(float3 a, float3 b) + { + return a.x * b.x + a.y * b.y + a.z * b.z; + } + ''') + + +_affine_transform_3d_array_kernel = _core.ElementwiseKernel( + 'U texObj, raw float32 m, uint64 height, uint64 width', + 'T transformed_volume', + ''' + float4 voxel = make_float4( + (float)(i / (width * height)), + (float)((i % (width * height)) / width), + (float)((i % (width * height)) % width), + 1.0f + ); + float x = dot(voxel, make_float4(m[0], m[1], m[2], m[3])) + .5f; + float y = dot(voxel, make_float4(m[4], m[5], m[6], m[7])) + .5f; + float z = dot(voxel, make_float4(m[8], m[9], m[10], m[11])) + .5f; + transformed_volume = tex3D(texObj, z, y, x); + ''', + 'cupyx_texture_affine_transformation_3d_array', + preamble=''' + inline __host__ __device__ float dot(float4 a, float4 b) + { + return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; + } + ''') + + +def _create_texture_object(data, + address_mode: str, + filter_mode: str, + read_mode: str, + border_color=0): + + if cupy.issubdtype(data.dtype, cupy.unsignedinteger): + fmt_kind = runtime.cudaChannelFormatKindUnsigned + elif cupy.issubdtype(data.dtype, cupy.integer): + fmt_kind = runtime.cudaChannelFormatKindSigned + elif cupy.issubdtype(data.dtype, cupy.floating): + fmt_kind = runtime.cudaChannelFormatKindFloat + else: + raise ValueError(f'Unsupported data type {data.dtype}') + + if address_mode == 'nearest': + address_mode = runtime.cudaAddressModeClamp + elif address_mode == 'constant': + address_mode = runtime.cudaAddressModeBorder + else: + raise ValueError( + f'Unsupported address mode {address_mode} ' + '(supported: constant, nearest)') + + if filter_mode == 'nearest': + filter_mode = runtime.cudaFilterModePoint + elif filter_mode == 'linear': + filter_mode = runtime.cudaFilterModeLinear + else: + raise ValueError( + f'Unsupported filter mode {filter_mode} ' + f'(supported: nearest, linear)') + + if read_mode == 'element_type': + read_mode = runtime.cudaReadModeElementType + elif read_mode == 'normalized_float': + read_mode = runtime.cudaReadModeNormalizedFloat + else: + raise ValueError( + f'Unsupported read mode {read_mode} ' + '(supported: element_type, normalized_float)') + + texture_fmt = texture.ChannelFormatDescriptor( + data.itemsize * 8, 0, 0, 0, fmt_kind) + # CUDAArray: last dimension is the fastest changing dimension + array = texture.CUDAarray(texture_fmt, *data.shape[::-1]) + res_desc = texture.ResourceDescriptor( + runtime.cudaResourceTypeArray, cuArr=array) + # TODO(the-lay): each dimension can have a different addressing mode + # TODO(the-lay): border color/value can be defined for up to 4 channels + tex_desc = texture.TextureDescriptor( + (address_mode, ) * data.ndim, filter_mode, read_mode, + borderColors=(border_color, )) + tex_obj = texture.TextureObject(res_desc, tex_desc) + array.copy_from(data) + + return tex_obj + + +def affine_transformation(data, + transformation_matrix, + output_shape=None, + output=None, + interpolation: str = 'linear', + mode: str = 'constant', + border_value=0): + """ + Apply an affine transformation. + + The method uses texture memory and supports only 2D and 3D float32 arrays + without channel dimension. + + Args: + data (cupy.ndarray): The input array or texture object. + transformation_matrix (cupy.ndarray): Affine transformation matrix. + Must be a homogeneous and have shape ``(ndim + 1, ndim + 1)``. + output_shape (tuple of ints): Shape of output. If not specified, + the input array shape is used. Default is None. + output (cupy.ndarray or ~cupy.dtype): The array in which to place the + output, or the dtype of the returned array. If not specified, + creates the output array with shape of ``output_shape``. Default is + None. + interpolation (str): Specifies interpolation mode: ``'linear'`` or + ``'nearest'``. Default is ``'linear'``. + mode (str): Specifies addressing mode for points outside of the array: + (`'constant'``, ``'nearest'``). Default is ``'constant'``. + border_value: Specifies value to be used for coordinates outside + of the array for ``'constant'`` mode. Default is 0. + + Returns: + cupy.ndarray: + The transformed input. + + .. seealso:: :func:`cupyx.scipy.ndimage.affine_transform` + """ + + ndim = data.ndim + if (ndim < 2) or (ndim > 3): + raise ValueError( + 'Texture memory affine transformation is defined only for ' + '2D and 3D arrays without channel dimension.') + + dtype = data.dtype + if dtype != cupy.float32: + raise ValueError(f'Texture memory affine transformation is available ' + f'only for float32 data type (not {dtype})') + + if interpolation not in ['linear', 'nearest']: + raise ValueError( + f'Unsupported interpolation {interpolation} ' + f'(supported: linear, nearest)') + + if transformation_matrix.shape != (ndim + 1, ndim + 1): + raise ValueError('Matrix must be have shape (ndim + 1, ndim + 1)') + + texture_object = _create_texture_object(data, + address_mode=mode, + filter_mode=interpolation, + read_mode='element_type', + border_color=border_value) + + if ndim == 2: + kernel = _affine_transform_2d_array_kernel + else: + kernel = _affine_transform_3d_array_kernel + + if output_shape is None: + output_shape = data.shape + + if output is None: + output = cupy.zeros(output_shape, dtype=dtype) + elif isinstance(output, (type, cupy.dtype)): + if output != cupy.float32: + raise ValueError(f'Texture memory affine transformation is ' + f'available only for float32 data type (not ' + f'{output})') + output = cupy.zeros(output_shape, dtype=output) + elif isinstance(output, cupy.ndarray): + if output.shape != output_shape: + raise ValueError('Output shapes do not match') + else: + raise ValueError('Output must be None, cupy.ndarray or cupy.dtype') + + kernel(texture_object, transformation_matrix, *output_shape[1:], output) + return output diff --git a/vllm/lib/python3.10/site-packages/cupyx/_ufunc_config.py b/vllm/lib/python3.10/site-packages/cupyx/_ufunc_config.py new file mode 100644 index 0000000000000000000000000000000000000000..33973a16b94dc95adb11fee278a25e206a790737 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/_ufunc_config.py @@ -0,0 +1,123 @@ +import contextlib +import threading + + +_config = threading.local() + + +def get_config_divide(): + try: + value = _config.divide + except AttributeError: + value = _config.divide = None + return value + + +def get_config_over(): + try: + value = _config.over + except AttributeError: + value = _config.over = None + return value + + +def get_config_under(): + try: + value = _config.under + except AttributeError: + value = _config.under = None + return value + + +def get_config_invalid(): + try: + value = _config.invalid + except AttributeError: + value = _config.invalid = None + return value + + +def get_config_linalg(): + # In favor of performance, the `devInfo` input/output from cuSOLVER routine + # calls that is necessary to check the validity of the other outputs, are + # ignored, as D2H copy incurring device synchronizations would otherwise be + # required. + try: + value = _config.linalg + except AttributeError: + value = _config.linalg = 'ignore' + return value + + +def get_config_fallback_mode(): + try: + value = _config.fallback_mode + except AttributeError: + value = _config.fallback_mode = 'ignore' + return value + + +@contextlib.contextmanager +def errstate(*, divide=None, over=None, under=None, + invalid=None, linalg=None, fallback_mode=None): + """ + TODO(hvy): Write docs. + """ + old_state = seterr( + divide=divide, over=over, under=under, + invalid=invalid, linalg=linalg, fallback_mode=fallback_mode) + try: + yield # Return `None` similar to `numpy.errstate`. + finally: + seterr(**old_state) + + +def seterr(*, divide=None, over=None, under=None, + invalid=None, linalg=None, fallback_mode=None): + """ + TODO(hvy): Write docs. + """ + old_state = geterr() + + if divide is not None: + raise NotImplementedError() + if over is not None: + raise NotImplementedError() + if under is not None: + raise NotImplementedError() + if invalid is not None: + raise NotImplementedError() + if linalg is not None: + if linalg in ('ignore', 'raise'): + _config.linalg = linalg + else: + raise NotImplementedError() + if fallback_mode is not None: + if fallback_mode in ['print', 'warn', 'ignore', 'raise']: + _config.fallback_mode = fallback_mode + elif fallback_mode in ['log', 'call']: + raise NotImplementedError + else: + raise ValueError( + '{} is not a valid dispatch type'.format(fallback_mode)) + + _config.divide = divide + _config.under = under + _config.over = over + _config.invalid = invalid + + return old_state + + +def geterr(): + """ + TODO(hvy): Write docs. + """ + return dict( + divide=get_config_divide(), + over=get_config_over(), + under=get_config_under(), + invalid=get_config_invalid(), + linalg=get_config_linalg(), + fallback_mode=get_config_fallback_mode(), + ) diff --git a/vllm/lib/python3.10/site-packages/cupyx/cusparse.py b/vllm/lib/python3.10/site-packages/cupyx/cusparse.py new file mode 100644 index 0000000000000000000000000000000000000000..10319d43a6bebf30be7734f9ae6ea3b432163e79 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/cusparse.py @@ -0,0 +1,2093 @@ +import functools as _functools + +import numpy as _numpy +import platform as _platform + +import cupy as _cupy +from cupy_backends.cuda.api import driver as _driver +from cupy_backends.cuda.api import runtime as _runtime +from cupy_backends.cuda.libs import cusparse as _cusparse +from cupy._core import _dtype +from cupy.cuda import device as _device +from cupy.cuda import stream as _stream +from cupy import _util +import cupyx.scipy.sparse + + +class MatDescriptor(object): + + def __init__(self, descriptor): + self.descriptor = descriptor + + @classmethod + def create(cls): + descr = _cusparse.createMatDescr() + return MatDescriptor(descr) + + def __reduce__(self): + return self.create, () + + def __del__(self, is_shutting_down=_util.is_shutting_down): + if is_shutting_down(): + return + if self.descriptor: + _cusparse.destroyMatDescr(self.descriptor) + self.descriptor = None + + def set_mat_type(self, typ): + _cusparse.setMatType(self.descriptor, typ) + + def set_mat_index_base(self, base): + _cusparse.setMatIndexBase(self.descriptor, base) + + def set_mat_fill_mode(self, fill_mode): + _cusparse.setMatFillMode(self.descriptor, fill_mode) + + def set_mat_diag_type(self, diag_type): + _cusparse.setMatDiagType(self.descriptor, diag_type) + + +def _cast_common_type(*xs): + dtypes = [x.dtype for x in xs if x is not None] + dtype = _functools.reduce(_numpy.promote_types, dtypes) + return [x.astype(dtype) if x is not None and x.dtype != dtype else x + for x in xs] + + +def _transpose_flag(trans): + if trans: + return _cusparse.CUSPARSE_OPERATION_TRANSPOSE + else: + return _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + + +def _call_cusparse(name, dtype, *args): + if dtype == 'f': + prefix = 's' + elif dtype == 'd': + prefix = 'd' + elif dtype == 'F': + prefix = 'c' + elif dtype == 'D': + prefix = 'z' + else: + raise TypeError + f = getattr(_cusparse, prefix + name) + return f(*args) + + +_available_cusparse_version = { + 'csrmv': (8000, 11000), + 'csrmvEx': (8000, 11000), # TODO(anaruse): failure in cuSparse 11.0 + 'csrmm': (8000, 11000), + 'csrmm2': (8000, 11000), + 'csrgeam': (8000, 11000), + 'csrgeam2': (9020, None), + 'csrgemm': (8000, 11000), + 'csrgemm2': (8000, 12000), + 'gthr': (8000, 12000), + + # Generic APIs are not available on CUDA 10.2 on Windows. + 'spmv': ({'Linux': 10200, 'Windows': 11000}, None), + # accuracy bugs in cuSparse 10.3.0 + 'spmm': ({'Linux': 10301, 'Windows': 11000}, None), + + 'csr2dense': (8000, 12000), + 'csc2dense': (8000, 12000), + 'csrsort': (8000, None), + 'cscsort': (8000, None), + 'coosort': (8000, None), + 'coo2csr': (8000, None), + 'csr2coo': (8000, None), + 'csr2csc': (8000, 11000), + 'csc2csr': (8000, 11000), # the entity is csr2csc + 'csr2cscEx2': (10200, None), + 'csc2csrEx2': (10200, None), # the entity is csr2cscEx2 + 'dense2csc': (8000, None), + 'dense2csr': (8000, None), + 'csr2csr_compress': (8000, None), + 'csrsm2': (9020, 12000), + 'csrilu02': (8000, None), + 'denseToSparse': (11300, None), + 'sparseToDense': (11300, None), + 'spgemm': (11100, None), + 'spsm': (11600, None), # CUDA 11.3.1 +} + + +_available_hipsparse_version = { + # For APIs supported by CUDA but not yet by HIP, we still need them here + # so that our test suite can cover both platforms. + 'csrmv': (305, None), + 'csrmvEx': (_numpy.inf, None), + 'csrmm': (305, None), + 'csrmm2': (305, None), + 'csrgeam': (305, None), + 'csrgeam2': (305, None), + 'csrgemm': (305, None), + 'csrgemm2': (305, None), + 'gthr': (305, None), + 'spmv': (402, None), + 'spmm': (402, None), + 'csr2dense': (305, None), + 'csc2dense': (305, None), + 'csrsort': (305, None), + 'cscsort': (305, None), + 'coosort': (305, None), + 'coo2csr': (305, None), + 'csr2coo': (305, None), + 'csr2csc': (305, None), + 'csc2csr': (305, None), # the entity is csr2csc + 'csr2cscEx2': (_numpy.inf, None), + 'csc2csrEx2': (_numpy.inf, None), # the entity is csr2cscEx2 + 'dense2csc': (305, None), + 'dense2csr': (305, None), + 'csr2csr_compress': (305, None), + 'csrsm2': (305, None), # available since 305 but seems buggy + 'csrilu02': (305, None), + 'denseToSparse': (402, None), + 'sparseToDense': (402, None), + 'spgemm': (_numpy.inf, None), + 'spsm': (50000000, None), +} + + +def _get_avail_version_from_spec(x): + if isinstance(x, dict): + os_name = _platform.system() + if os_name not in x: + msg = 'No version information specified for the OS: {}'.format( + os_name) + raise ValueError(msg) + return x[os_name] + return x + + +@_util.memoize() +def check_availability(name): + if not _runtime.is_hip: + available_version = _available_cusparse_version + version = _cusparse.get_build_version() + else: + available_version = _available_hipsparse_version + version = _driver.get_build_version() # = HIP_VERSION + if name not in available_version: + msg = 'No available version information specified for {}'.format(name) + raise ValueError(msg) + version_added, version_removed = available_version[name] + version_added = _get_avail_version_from_spec(version_added) + version_removed = _get_avail_version_from_spec(version_removed) + if version_added is not None and version < version_added: + return False + if version_removed is not None and version >= version_removed: + return False + return True + + +def getVersion() -> int: + return _cusparse.getVersion(_device.get_cusparse_handle()) + + +def csrmv(a, x, y=None, alpha=1, beta=0, transa=False): + """Matrix-vector product for a CSR-matrix and a dense vector. + + .. math:: + + y = \\alpha * o_a(A) x + \\beta y, + + where :math:`o_a` is a transpose function when ``transa`` is ``True`` and + is an identity function otherwise. + + Args: + a (cupyx.cusparse.csr_matrix): Matrix A. + x (cupy.ndarray): Vector x. + y (cupy.ndarray or None): Vector y. It must be F-contiguous. + alpha (float): Coefficient for x. + beta (float): Coefficient for y. + transa (bool): If ``True``, transpose of ``A`` is used. + + Returns: + cupy.ndarray: Calculated ``y``. + + """ + if not check_availability('csrmv'): + raise RuntimeError('csrmv is not available.') + + assert y is None or y.flags.f_contiguous + + a_shape = a.shape if not transa else a.shape[::-1] + if a_shape[1] != len(x): + raise ValueError('dimension mismatch') + + handle = _device.get_cusparse_handle() + m, n = a_shape + a, x, y = _cast_common_type(a, x, y) + dtype = a.dtype + if y is None: + y = _cupy.zeros(m, dtype) + alpha = _numpy.array(alpha, dtype).ctypes + beta = _numpy.array(beta, dtype).ctypes + + _call_cusparse( + 'csrmv', dtype, + handle, _transpose_flag(transa), + a.shape[0], a.shape[1], a.nnz, alpha.data, a._descr.descriptor, + a.data.data.ptr, a.indptr.data.ptr, a.indices.data.ptr, + x.data.ptr, beta.data, y.data.ptr) + + return y + + +def csrmvExIsAligned(a, x, y=None): + """Check if the pointers of arguments for csrmvEx are aligned or not + + Args: + a (cupyx.cusparse.csr_matrix): Matrix A. + x (cupy.ndarray): Vector x. + y (cupy.ndarray or None): Vector y. + + Check if a, x, y pointers are aligned by 128 bytes as + required by csrmvEx. + + Returns: + bool: + ``True`` if all pointers are aligned. + ``False`` if otherwise. + + """ + + if a.data.data.ptr % 128 != 0: + return False + if a.indptr.data.ptr % 128 != 0: + return False + if a.indices.data.ptr % 128 != 0: + return False + if x.data.ptr % 128 != 0: + return False + if y is not None and y.data.ptr % 128 != 0: + return False + return True + + +def csrmvEx(a, x, y=None, alpha=1, beta=0, merge_path=True): + """Matrix-vector product for a CSR-matrix and a dense vector. + + .. math:: + + y = \\alpha * A x + \\beta y, + + Args: + a (cupyx.cusparse.csr_matrix): Matrix A. + x (cupy.ndarray): Vector x. + y (cupy.ndarray or None): Vector y. It must be F-contiguous. + alpha (float): Coefficient for x. + beta (float): Coefficient for y. + merge_path (bool): If ``True``, merge path algorithm is used. + + All pointers must be aligned with 128 bytes. + + Returns: + cupy.ndarray: Calculated ``y``. + + """ + if not check_availability('csrmvEx'): + raise RuntimeError('csrmvEx is not available.') + + assert y is None or y.flags.f_contiguous + + if a.shape[1] != len(x): + raise ValueError('dimension mismatch') + + handle = _device.get_cusparse_handle() + m, n = a.shape + + a, x, y = _cast_common_type(a, x, y) + dtype = a.dtype + if y is None: + y = _cupy.zeros(m, dtype) + + datatype = _dtype.to_cuda_dtype(dtype) + algmode = _cusparse.CUSPARSE_ALG_MERGE_PATH if \ + merge_path else _cusparse.CUSPARSE_ALG_NAIVE + transa_flag = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + + alpha = _numpy.array(alpha, dtype).ctypes + beta = _numpy.array(beta, dtype).ctypes + + assert csrmvExIsAligned(a, x, y) + + bufferSize = _cusparse.csrmvEx_bufferSize( + handle, algmode, transa_flag, + a.shape[0], a.shape[1], a.nnz, alpha.data, datatype, + a._descr.descriptor, a.data.data.ptr, datatype, + a.indptr.data.ptr, a.indices.data.ptr, + x.data.ptr, datatype, beta.data, datatype, + y.data.ptr, datatype, datatype) + + buf = _cupy.empty(bufferSize, 'b') + assert buf.data.ptr % 128 == 0 + + _cusparse.csrmvEx( + handle, algmode, transa_flag, + a.shape[0], a.shape[1], a.nnz, alpha.data, datatype, + a._descr.descriptor, a.data.data.ptr, datatype, + a.indptr.data.ptr, a.indices.data.ptr, + x.data.ptr, datatype, beta.data, datatype, + y.data.ptr, datatype, datatype, buf.data.ptr) + return y + + +def csrmm(a, b, c=None, alpha=1, beta=0, transa=False): + """Matrix-matrix product for a CSR-matrix and a dense matrix. + + .. math:: + + C = \\alpha o_a(A) B + \\beta C, + + where :math:`o_a` is a transpose function when ``transa`` is ``True`` and + is an identity function otherwise. + + Args: + a (cupyx.scipy.sparse.csr): Sparse matrix A. + b (cupy.ndarray): Dense matrix B. It must be F-contiguous. + c (cupy.ndarray or None): Dense matrix C. It must be F-contiguous. + alpha (float): Coefficient for AB. + beta (float): Coefficient for C. + transa (bool): If ``True``, transpose of A is used. + + Returns: + cupy.ndarray: Calculated C. + + """ + if not check_availability('csrmm'): + raise RuntimeError('csrmm is not available.') + + assert a.ndim == b.ndim == 2 + assert b.flags.f_contiguous + assert c is None or c.flags.f_contiguous + + a_shape = a.shape if not transa else a.shape[::-1] + if a_shape[1] != b.shape[0]: + raise ValueError('dimension mismatch') + + handle = _device.get_cusparse_handle() + m, k = a_shape + n = b.shape[1] + + a, b, c = _cast_common_type(a, b, c) + if c is None: + c = _cupy.zeros((m, n), a.dtype, 'F') + + ldb = k + ldc = m + + alpha = _numpy.array(alpha, a.dtype).ctypes + beta = _numpy.array(beta, a.dtype).ctypes + _call_cusparse( + 'csrmm', a.dtype, + handle, _transpose_flag(transa), + a.shape[0], n, a.shape[1], a.nnz, + alpha.data, a._descr.descriptor, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, + b.data.ptr, ldb, beta.data, c.data.ptr, ldc) + return c + + +def csrmm2(a, b, c=None, alpha=1.0, beta=0.0, transa=False, transb=False): + """Matrix-matrix product for a CSR-matrix and a dense matrix. + + .. math:: + + C = \\alpha o_a(A) o_b(B) + \\beta C, + + where :math:`o_a` and :math:`o_b` are transpose functions when ``transa`` + and ``tranb`` are ``True`` respectively. And they are identity functions + otherwise. + It is forbidden that both ``transa`` and ``transb`` are ``True`` in + cuSPARSE specification. + + Args: + a (cupyx.scipy.sparse.csr): Sparse matrix A. + b (cupy.ndarray): Dense matrix B. It must be F-contiguous. + c (cupy.ndarray or None): Dense matrix C. It must be F-contiguous. + alpha (float): Coefficient for AB. + beta (float): Coefficient for C. + transa (bool): If ``True``, transpose of A is used. + transb (bool): If ``True``, transpose of B is used. + + Returns: + cupy.ndarray: Calculated C. + + """ + if not check_availability('csrmm2'): + raise RuntimeError('csrmm2 is not available.') + + assert a.ndim == b.ndim == 2 + assert a.has_canonical_format + assert b.flags.f_contiguous + assert c is None or c.flags.f_contiguous + assert not (transa and transb) + + a_shape = a.shape if not transa else a.shape[::-1] + b_shape = b.shape if not transb else b.shape[::-1] + if a_shape[1] != b_shape[0]: + raise ValueError('dimension mismatch') + + handle = _device.get_cusparse_handle() + m, k = a_shape + n = b_shape[1] + + a, b, c = _cast_common_type(a, b, c) + if c is None: + c = _cupy.zeros((m, n), a.dtype, 'F') + + ldb = b.shape[0] + ldc = c.shape[0] + op_a = _transpose_flag(transa) + op_b = _transpose_flag(transb) + alpha = _numpy.array(alpha, a.dtype).ctypes + beta = _numpy.array(beta, a.dtype).ctypes + _call_cusparse( + 'csrmm2', a.dtype, + handle, op_a, op_b, a.shape[0], n, a.shape[1], a.nnz, + alpha.data, a._descr.descriptor, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, + b.data.ptr, ldb, beta.data, c.data.ptr, ldc) + return c + + +def csrgeam(a, b, alpha=1, beta=1): + """Matrix-matrix addition. + + .. math:: + C = \\alpha A + \\beta B + + Args: + a (cupyx.scipy.sparse.csr_matrix): Sparse matrix A. + b (cupyx.scipy.sparse.csr_matrix): Sparse matrix B. + alpha (float): Coefficient for A. + beta (float): Coefficient for B. + + Returns: + cupyx.scipy.sparse.csr_matrix: Result matrix. + + """ + if not check_availability('csrgeam'): + raise RuntimeError('csrgeam is not available.') + + if not isinstance(a, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(a))) + if not isinstance(b, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(b))) + assert a.has_canonical_format + assert b.has_canonical_format + if a.shape != b.shape: + raise ValueError('inconsistent shapes') + + handle = _device.get_cusparse_handle() + m, n = a.shape + a, b = _cast_common_type(a, b) + nnz = _numpy.empty((), 'i') + _cusparse.setPointerMode( + handle, _cusparse.CUSPARSE_POINTER_MODE_HOST) + + c_descr = MatDescriptor.create() + c_indptr = _cupy.empty(m + 1, 'i') + + _cusparse.xcsrgeamNnz( + handle, m, n, + a._descr.descriptor, a.nnz, a.indptr.data.ptr, a.indices.data.ptr, + b._descr.descriptor, b.nnz, b.indptr.data.ptr, b.indices.data.ptr, + c_descr.descriptor, c_indptr.data.ptr, nnz.ctypes.data) + + c_indices = _cupy.empty(int(nnz), 'i') + c_data = _cupy.empty(int(nnz), a.dtype) + alpha = _numpy.array(alpha, a.dtype).ctypes + beta = _numpy.array(beta, a.dtype).ctypes + _call_cusparse( + 'csrgeam', a.dtype, + handle, m, n, alpha.data, + a._descr.descriptor, a.nnz, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, beta.data, + b._descr.descriptor, b.nnz, b.data.data.ptr, + b.indptr.data.ptr, b.indices.data.ptr, + c_descr.descriptor, c_data.data.ptr, c_indptr.data.ptr, + c_indices.data.ptr) + + c = cupyx.scipy.sparse.csr_matrix( + (c_data, c_indices, c_indptr), shape=a.shape) + c._has_canonical_format = True + return c + + +def csrgeam2(a, b, alpha=1, beta=1): + """Matrix-matrix addition. + + .. math:: + C = \\alpha A + \\beta B + + Args: + a (cupyx.scipy.sparse.csr_matrix): Sparse matrix A. + b (cupyx.scipy.sparse.csr_matrix): Sparse matrix B. + alpha (float): Coefficient for A. + beta (float): Coefficient for B. + + Returns: + cupyx.scipy.sparse.csr_matrix: Result matrix. + + """ + if not check_availability('csrgeam2'): + raise RuntimeError('csrgeam2 is not available.') + + if not isinstance(a, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(a))) + if not isinstance(b, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(b))) + assert a.has_canonical_format + assert b.has_canonical_format + if a.shape != b.shape: + raise ValueError('inconsistent shapes') + + handle = _device.get_cusparse_handle() + m, n = a.shape + a, b = _cast_common_type(a, b) + nnz = _numpy.empty((), 'i') + _cusparse.setPointerMode( + handle, _cusparse.CUSPARSE_POINTER_MODE_HOST) + + alpha = _numpy.array(alpha, a.dtype).ctypes + beta = _numpy.array(beta, a.dtype).ctypes + c_descr = MatDescriptor.create() + c_indptr = _cupy.empty(m + 1, 'i') + + null_ptr = 0 + buff_size = _call_cusparse( + 'csrgeam2_bufferSizeExt', a.dtype, + handle, m, n, alpha.data, a._descr.descriptor, a.nnz, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, beta.data, b._descr.descriptor, + b.nnz, b.data.data.ptr, b.indptr.data.ptr, b.indices.data.ptr, + c_descr.descriptor, null_ptr, c_indptr.data.ptr, null_ptr) + buff = _cupy.empty(buff_size, _numpy.int8) + _cusparse.xcsrgeam2Nnz( + handle, m, n, a._descr.descriptor, a.nnz, a.indptr.data.ptr, + a.indices.data.ptr, b._descr.descriptor, b.nnz, b.indptr.data.ptr, + b.indices.data.ptr, c_descr.descriptor, c_indptr.data.ptr, + nnz.ctypes.data, buff.data.ptr) + c_indices = _cupy.empty(int(nnz), 'i') + c_data = _cupy.empty(int(nnz), a.dtype) + _call_cusparse( + 'csrgeam2', a.dtype, + handle, m, n, alpha.data, a._descr.descriptor, a.nnz, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, beta.data, b._descr.descriptor, + b.nnz, b.data.data.ptr, b.indptr.data.ptr, b.indices.data.ptr, + c_descr.descriptor, c_data.data.ptr, c_indptr.data.ptr, + c_indices.data.ptr, buff.data.ptr) + + c = cupyx.scipy.sparse.csr_matrix( + (c_data, c_indices, c_indptr), shape=a.shape) + c._has_canonical_format = True + return c + + +def csrgemm(a, b, transa=False, transb=False): + """Matrix-matrix product for CSR-matrix. + + math:: + C = op(A) op(B), + + Args: + a (cupyx.scipy.sparse.csr_matrix): Sparse matrix A. + b (cupyx.scipy.sparse.csr_matrix): Sparse matrix B. + transa (bool): If ``True``, transpose of A is used. + transb (bool): If ``True``, transpose of B is used. + + Returns: + cupyx.scipy.sparse.csr_matrix: Calculated C. + + """ + if not check_availability('csrgemm'): + raise RuntimeError('csrgemm is not available.') + + assert a.ndim == b.ndim == 2 + assert a.has_canonical_format + assert b.has_canonical_format + a_shape = a.shape if not transa else a.shape[::-1] + b_shape = b.shape if not transb else b.shape[::-1] + if a_shape[1] != b_shape[0]: + raise ValueError('dimension mismatch') + + handle = _device.get_cusparse_handle() + m, k = a_shape + n = b_shape[1] + + a, b = _cast_common_type(a, b) + + if a.nnz == 0 or b.nnz == 0: + return cupyx.scipy.sparse.csr_matrix((m, n), dtype=a.dtype) + + op_a = _transpose_flag(transa) + op_b = _transpose_flag(transb) + + nnz = _numpy.empty((), 'i') + _cusparse.setPointerMode( + handle, _cusparse.CUSPARSE_POINTER_MODE_HOST) + + c_descr = MatDescriptor.create() + c_indptr = _cupy.empty(m + 1, 'i') + + _cusparse.xcsrgemmNnz( + handle, op_a, op_b, m, n, k, a._descr.descriptor, a.nnz, + a.indptr.data.ptr, a.indices.data.ptr, b._descr.descriptor, b.nnz, + b.indptr.data.ptr, b.indices.data.ptr, c_descr.descriptor, + c_indptr.data.ptr, nnz.ctypes.data) + + c_indices = _cupy.empty(int(nnz), 'i') + c_data = _cupy.empty(int(nnz), a.dtype) + _call_cusparse( + 'csrgemm', a.dtype, + handle, op_a, op_b, m, n, k, a._descr.descriptor, a.nnz, + a.data.data.ptr, a.indptr.data.ptr, a.indices.data.ptr, + b._descr.descriptor, b.nnz, b.data.data.ptr, b.indptr.data.ptr, + b.indices.data.ptr, + c_descr.descriptor, c_data.data.ptr, c_indptr.data.ptr, + c_indices.data.ptr) + + c = cupyx.scipy.sparse.csr_matrix( + (c_data, c_indices, c_indptr), shape=(m, n)) + c._has_canonical_format = True + return c + + +def csrgemm2(a, b, d=None, alpha=1, beta=1): + """Matrix-matrix product for CSR-matrix. + + math:: + C = alpha * A * B + beta * D + + Args: + a (cupyx.scipy.sparse.csr_matrix): Sparse matrix A. + b (cupyx.scipy.sparse.csr_matrix): Sparse matrix B. + d (cupyx.scipy.sparse.csr_matrix or None): Sparse matrix D. + alpha (scalar): Coefficient + beta (scalar): Coefficient + + Returns: + cupyx.scipy.sparse.csr_matrix + + """ + if not check_availability('csrgemm2'): + raise RuntimeError('csrgemm2 is not available.') + + assert a.ndim == b.ndim == 2 + if not isinstance(a, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(a))) + if not isinstance(b, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(b))) + assert a.has_canonical_format + assert b.has_canonical_format + if a.shape[1] != b.shape[0]: + raise ValueError('mismatched shape') + if d is not None: + assert d.ndim == 2 + if not isinstance(d, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(d))) + assert d.has_canonical_format + if a.shape[0] != d.shape[0] or b.shape[1] != d.shape[1]: + raise ValueError('mismatched shape') + if _runtime.is_hip and _driver.get_build_version() < 402: + raise RuntimeError('d != None is supported since ROCm 4.2.0') + + handle = _device.get_cusparse_handle() + m, k = a.shape + _, n = b.shape + + if d is None: + a, b = _cast_common_type(a, b) + else: + a, b, d = _cast_common_type(a, b, d) + + info = _cusparse.createCsrgemm2Info() + alpha = _numpy.array(alpha, a.dtype).ctypes + null_ptr = 0 + if d is None: + beta_data = null_ptr + d_descr = MatDescriptor.create() + d_nnz = 0 + d_data = null_ptr + d_indptr = null_ptr + d_indices = null_ptr + else: + beta = _numpy.array(beta, a.dtype).ctypes + beta_data = beta.data + d_descr = d._descr + d_nnz = d.nnz + d_data = d.data.data.ptr + d_indptr = d.indptr.data.ptr + d_indices = d.indices.data.ptr + + buff_size = _call_cusparse( + 'csrgemm2_bufferSizeExt', a.dtype, + handle, m, n, k, alpha.data, a._descr.descriptor, a.nnz, + a.indptr.data.ptr, a.indices.data.ptr, b._descr.descriptor, b.nnz, + b.indptr.data.ptr, b.indices.data.ptr, beta_data, d_descr.descriptor, + d_nnz, d_indptr, d_indices, info) + buff = _cupy.empty(buff_size, _numpy.int8) + + c_nnz = _numpy.empty((), 'i') + _cusparse.setPointerMode(handle, _cusparse.CUSPARSE_POINTER_MODE_HOST) + + c_descr = MatDescriptor.create() + c_indptr = _cupy.empty(m + 1, 'i') + _cusparse.xcsrgemm2Nnz( + handle, m, n, k, a._descr.descriptor, a.nnz, a.indptr.data.ptr, + a.indices.data.ptr, b._descr.descriptor, b.nnz, b.indptr.data.ptr, + b.indices.data.ptr, d_descr.descriptor, d_nnz, d_indptr, d_indices, + c_descr.descriptor, c_indptr.data.ptr, c_nnz.ctypes.data, info, + buff.data.ptr) + + c_indices = _cupy.empty(int(c_nnz), 'i') + c_data = _cupy.empty(int(c_nnz), a.dtype) + _call_cusparse( + 'csrgemm2', a.dtype, + handle, m, n, k, alpha.data, a._descr.descriptor, a.nnz, + a.data.data.ptr, a.indptr.data.ptr, a.indices.data.ptr, + b._descr.descriptor, b.nnz, b.data.data.ptr, b.indptr.data.ptr, + b.indices.data.ptr, beta_data, d_descr.descriptor, d_nnz, d_data, + d_indptr, d_indices, c_descr.descriptor, c_data.data.ptr, + c_indptr.data.ptr, c_indices.data.ptr, info, buff.data.ptr) + + c = cupyx.scipy.sparse.csr_matrix( + (c_data, c_indices, c_indptr), shape=(m, n)) + c._has_canonical_format = True + _cusparse.destroyCsrgemm2Info(info) + return c + + +def csr2dense(x, out=None): + """Converts CSR-matrix to a dense matrix. + + Args: + x (cupyx.scipy.sparse.csr_matrix): A sparse matrix to convert. + out (cupy.ndarray or None): A dense metrix to store the result. + It must be F-contiguous. + + Returns: + cupy.ndarray: Converted result. + + """ + if not check_availability('csr2dense'): + raise RuntimeError('csr2dense is not available.') + + dtype = x.dtype + assert dtype.char in 'fdFD' + if out is None: + out = _cupy.empty(x.shape, dtype=dtype, order='F') + else: + assert out.flags.f_contiguous + + handle = _device.get_cusparse_handle() + _call_cusparse( + 'csr2dense', x.dtype, + handle, x.shape[0], x.shape[1], x._descr.descriptor, + x.data.data.ptr, x.indptr.data.ptr, x.indices.data.ptr, + out.data.ptr, x.shape[0]) + + return out + + +def csc2dense(x, out=None): + """Converts CSC-matrix to a dense matrix. + + Args: + x (cupyx.scipy.sparse.csc_matrix): A sparse matrix to convert. + out (cupy.ndarray or None): A dense metrix to store the result. + It must be F-contiguous. + + Returns: + cupy.ndarray: Converted result. + + """ + if not check_availability('csc2dense'): + raise RuntimeError('csc2dense is not available.') + + dtype = x.dtype + assert dtype.char in 'fdFD' + if out is None: + out = _cupy.empty(x.shape, dtype=dtype, order='F') + else: + assert out.flags.f_contiguous + + handle = _device.get_cusparse_handle() + _call_cusparse( + 'csc2dense', x.dtype, + handle, x.shape[0], x.shape[1], x._descr.descriptor, + x.data.data.ptr, x.indices.data.ptr, x.indptr.data.ptr, + out.data.ptr, x.shape[0]) + + return out + + +def csrsort(x): + """Sorts indices of CSR-matrix in place. + + Args: + x (cupyx.scipy.sparse.csr_matrix): A sparse matrix to sort. + + """ + if not check_availability('csrsort'): + raise RuntimeError('csrsort is not available.') + + nnz = x.nnz + if nnz == 0: + return + handle = _device.get_cusparse_handle() + m, n = x.shape + + buffer_size = _cusparse.xcsrsort_bufferSizeExt( + handle, m, n, nnz, x.indptr.data.ptr, + x.indices.data.ptr) + buf = _cupy.empty(buffer_size, 'b') + P = _cupy.empty(nnz, 'i') + data_orig = x.data.copy() + _cusparse.createIdentityPermutation(handle, nnz, P.data.ptr) + _cusparse.xcsrsort( + handle, m, n, nnz, x._descr.descriptor, x.indptr.data.ptr, + x.indices.data.ptr, P.data.ptr, buf.data.ptr) + + if check_availability('gthr'): + _call_cusparse( + 'gthr', x.dtype, + handle, nnz, data_orig.data.ptr, x.data.data.ptr, + P.data.ptr, _cusparse.CUSPARSE_INDEX_BASE_ZERO) + else: + desc_x = SpVecDescriptor.create(P, x.data) + desc_y = DnVecDescriptor.create(data_orig) + _cusparse.gather(handle, desc_y.desc, desc_x.desc) + + +def cscsort(x): + """Sorts indices of CSC-matrix in place. + + Args: + x (cupyx.scipy.sparse.csc_matrix): A sparse matrix to sort. + + """ + if not check_availability('cscsort'): + raise RuntimeError('cscsort is not available.') + + nnz = x.nnz + if nnz == 0: + return + handle = _device.get_cusparse_handle() + m, n = x.shape + + buffer_size = _cusparse.xcscsort_bufferSizeExt( + handle, m, n, nnz, x.indptr.data.ptr, + x.indices.data.ptr) + buf = _cupy.empty(buffer_size, 'b') + P = _cupy.empty(nnz, 'i') + data_orig = x.data.copy() + _cusparse.createIdentityPermutation(handle, nnz, P.data.ptr) + _cusparse.xcscsort( + handle, m, n, nnz, x._descr.descriptor, x.indptr.data.ptr, + x.indices.data.ptr, P.data.ptr, buf.data.ptr) + + if check_availability('gthr'): + _call_cusparse( + 'gthr', x.dtype, + handle, nnz, data_orig.data.ptr, x.data.data.ptr, + P.data.ptr, _cusparse.CUSPARSE_INDEX_BASE_ZERO) + else: + desc_x = SpVecDescriptor.create(P, x.data) + desc_y = DnVecDescriptor.create(data_orig) + _cusparse.gather(handle, desc_y.desc, desc_x.desc) + + +def coosort(x, sort_by='r'): + """Sorts indices of COO-matrix in place. + + Args: + x (cupyx.scipy.sparse.coo_matrix): A sparse matrix to sort. + sort_by (str): Sort the indices by row ('r', default) or column ('c'). + + """ + if not check_availability('coosort'): + raise RuntimeError('coosort is not available.') + + nnz = x.nnz + if nnz == 0: + return + handle = _device.get_cusparse_handle() + m, n = x.shape + + buffer_size = _cusparse.xcoosort_bufferSizeExt( + handle, m, n, nnz, x.row.data.ptr, x.col.data.ptr) + buf = _cupy.empty(buffer_size, 'b') + P = _cupy.empty(nnz, 'i') + data_orig = x.data.copy() + _cusparse.createIdentityPermutation(handle, nnz, P.data.ptr) + if sort_by == 'r': + _cusparse.xcoosortByRow( + handle, m, n, nnz, x.row.data.ptr, x.col.data.ptr, + P.data.ptr, buf.data.ptr) + elif sort_by == 'c': + _cusparse.xcoosortByColumn( + handle, m, n, nnz, x.row.data.ptr, x.col.data.ptr, + P.data.ptr, buf.data.ptr) + else: + raise ValueError("sort_by must be either 'r' or 'c'") + + if x.dtype.char != '?': + if check_availability('gthr'): + _call_cusparse( + 'gthr', x.dtype, + handle, nnz, data_orig.data.ptr, x.data.data.ptr, + P.data.ptr, _cusparse.CUSPARSE_INDEX_BASE_ZERO) + else: + desc_x = SpVecDescriptor.create(P, x.data) + desc_y = DnVecDescriptor.create(data_orig) + _cusparse.gather(handle, desc_y.desc, desc_x.desc) + + if sort_by == 'c': # coo is sorted by row first + x._has_canonical_format = False + + +def coo2csr(x): + handle = _device.get_cusparse_handle() + m = x.shape[0] + nnz = x.nnz + if nnz == 0: + indptr = _cupy.zeros(m + 1, 'i') + else: + indptr = _cupy.empty(m + 1, 'i') + _cusparse.xcoo2csr( + handle, x.row.data.ptr, nnz, m, + indptr.data.ptr, _cusparse.CUSPARSE_INDEX_BASE_ZERO) + return cupyx.scipy.sparse.csr_matrix( + (x.data, x.col, indptr), shape=x.shape) + + +def coo2csc(x): + handle = _device.get_cusparse_handle() + n = x.shape[1] + nnz = x.nnz + if nnz == 0: + indptr = _cupy.zeros(n + 1, 'i') + else: + indptr = _cupy.empty(n + 1, 'i') + _cusparse.xcoo2csr( + handle, x.col.data.ptr, nnz, n, + indptr.data.ptr, _cusparse.CUSPARSE_INDEX_BASE_ZERO) + return cupyx.scipy.sparse.csc_matrix( + (x.data, x.row, indptr), shape=x.shape) + + +def csr2coo(x, data, indices): + """Converts a CSR-matrix to COO format. + + Args: + x (cupyx.scipy.sparse.csr_matrix): A matrix to be converted. + data (cupy.ndarray): A data array for converted data. + indices (cupy.ndarray): An index array for converted data. + + Returns: + cupyx.scipy.sparse.coo_matrix: A converted matrix. + + """ + if not check_availability('csr2coo'): + raise RuntimeError('csr2coo is not available.') + + handle = _device.get_cusparse_handle() + m = x.shape[0] + nnz = x.nnz + row = _cupy.empty(nnz, 'i') + _cusparse.xcsr2coo( + handle, x.indptr.data.ptr, nnz, m, row.data.ptr, + _cusparse.CUSPARSE_INDEX_BASE_ZERO) + # data and indices did not need to be copied already + return cupyx.scipy.sparse.coo_matrix( + (data, (row, indices)), shape=x.shape) + + +def csr2csc(x): + if not check_availability('csr2csc'): + raise RuntimeError('csr2csc is not available.') + + handle = _device.get_cusparse_handle() + m, n = x.shape + nnz = x.nnz + data = _cupy.empty(nnz, x.dtype) + indices = _cupy.empty(nnz, 'i') + if nnz == 0: + indptr = _cupy.zeros(n + 1, 'i') + else: + indptr = _cupy.empty(n + 1, 'i') + _call_cusparse( + 'csr2csc', x.dtype, + handle, m, n, nnz, x.data.data.ptr, + x.indptr.data.ptr, x.indices.data.ptr, + data.data.ptr, indices.data.ptr, indptr.data.ptr, + _cusparse.CUSPARSE_ACTION_NUMERIC, + _cusparse.CUSPARSE_INDEX_BASE_ZERO) + return cupyx.scipy.sparse.csc_matrix( + (data, indices, indptr), shape=x.shape) + + +def csr2cscEx2(x): + if not check_availability('csr2cscEx2'): + raise RuntimeError('csr2cscEx2 is not available.') + + handle = _device.get_cusparse_handle() + m, n = x.shape + nnz = x.nnz + data = _cupy.empty(nnz, x.dtype) + indices = _cupy.empty(nnz, 'i') + if nnz == 0: + indptr = _cupy.zeros(n + 1, 'i') + else: + indptr = _cupy.empty(n + 1, 'i') + x_dtype = _dtype.to_cuda_dtype(x.dtype) + action = _cusparse.CUSPARSE_ACTION_NUMERIC + ibase = _cusparse.CUSPARSE_INDEX_BASE_ZERO + algo = _cusparse.CUSPARSE_CSR2CSC_ALG1 + buffer_size = _cusparse.csr2cscEx2_bufferSize( + handle, m, n, nnz, x.data.data.ptr, x.indptr.data.ptr, + x.indices.data.ptr, data.data.ptr, indptr.data.ptr, + indices.data.ptr, x_dtype, action, ibase, algo) + buffer = _cupy.empty(buffer_size, _numpy.int8) + _cusparse.csr2cscEx2( + handle, m, n, nnz, x.data.data.ptr, x.indptr.data.ptr, + x.indices.data.ptr, data.data.ptr, indptr.data.ptr, + indices.data.ptr, x_dtype, action, ibase, algo, buffer.data.ptr) + return cupyx.scipy.sparse.csc_matrix( + (data, indices, indptr), shape=x.shape) + + +def csc2coo(x, data, indices): + """Converts a CSC-matrix to COO format. + + Args: + x (cupyx.scipy.sparse.csc_matrix): A matrix to be converted. + data (cupy.ndarray): A data array for converted data. + indices (cupy.ndarray): An index array for converted data. + + Returns: + cupyx.scipy.sparse.coo_matrix: A converted matrix. + + """ + handle = _device.get_cusparse_handle() + n = x.shape[1] + nnz = x.nnz + col = _cupy.empty(nnz, 'i') + _cusparse.xcsr2coo( + handle, x.indptr.data.ptr, nnz, n, col.data.ptr, + _cusparse.CUSPARSE_INDEX_BASE_ZERO) + # data and indices did not need to be copied already + return cupyx.scipy.sparse.coo_matrix( + (data, (indices, col)), shape=x.shape) + + +def csc2csr(x): + if not check_availability('csc2csr'): + raise RuntimeError('csr2csc is not available.') + + handle = _device.get_cusparse_handle() + m, n = x.shape + nnz = x.nnz + data = _cupy.empty(nnz, x.dtype) + indices = _cupy.empty(nnz, 'i') + if nnz == 0: + indptr = _cupy.zeros(m + 1, 'i') + else: + indptr = _cupy.empty(m + 1, 'i') + _call_cusparse( + 'csr2csc', x.dtype, + handle, n, m, nnz, x.data.data.ptr, + x.indptr.data.ptr, x.indices.data.ptr, + data.data.ptr, indices.data.ptr, indptr.data.ptr, + _cusparse.CUSPARSE_ACTION_NUMERIC, + _cusparse.CUSPARSE_INDEX_BASE_ZERO) + return cupyx.scipy.sparse.csr_matrix( + (data, indices, indptr), shape=x.shape) + + +def csc2csrEx2(x): + if not check_availability('csc2csrEx2'): + raise RuntimeError('csc2csrEx2 is not available.') + + handle = _device.get_cusparse_handle() + m, n = x.shape + nnz = x.nnz + data = _cupy.empty(nnz, x.dtype) + indices = _cupy.empty(nnz, 'i') + if nnz == 0: + indptr = _cupy.zeros(m + 1, 'i') + else: + indptr = _cupy.empty(m + 1, 'i') + x_dtype = _dtype.to_cuda_dtype(x.dtype) + action = _cusparse.CUSPARSE_ACTION_NUMERIC + ibase = _cusparse.CUSPARSE_INDEX_BASE_ZERO + algo = _cusparse.CUSPARSE_CSR2CSC_ALG1 + buffer_size = _cusparse.csr2cscEx2_bufferSize( + handle, n, m, nnz, x.data.data.ptr, x.indptr.data.ptr, + x.indices.data.ptr, data.data.ptr, indptr.data.ptr, + indices.data.ptr, x_dtype, action, ibase, algo) + buffer = _cupy.empty(buffer_size, _numpy.int8) + _cusparse.csr2cscEx2( + handle, n, m, nnz, x.data.data.ptr, x.indptr.data.ptr, + x.indices.data.ptr, data.data.ptr, indptr.data.ptr, + indices.data.ptr, x_dtype, action, ibase, algo, buffer.data.ptr) + return cupyx.scipy.sparse.csr_matrix( + (data, indices, indptr), shape=x.shape) + + +def dense2csc(x): + """Converts a dense matrix in CSC format. + + Args: + x (cupy.ndarray): A matrix to be converted. + + Returns: + cupyx.scipy.sparse.csc_matrix: A converted matrix. + + """ + if not check_availability('dense2csc'): + raise RuntimeError('dense2csc is not available.') + + assert x.ndim == 2 + x = _cupy.asfortranarray(x) + nnz = _numpy.empty((), dtype='i') + handle = _device.get_cusparse_handle() + m, n = x.shape + + descr = MatDescriptor.create() + nnz_per_col = _cupy.empty(m, 'i') + _call_cusparse( + 'nnz', x.dtype, + handle, _cusparse.CUSPARSE_DIRECTION_COLUMN, m, n, descr.descriptor, + x.data.ptr, m, nnz_per_col.data.ptr, nnz.ctypes.data) + + nnz = int(nnz) + data = _cupy.empty(nnz, x.dtype) + indptr = _cupy.empty(n + 1, 'i') + indices = _cupy.empty(nnz, 'i') + + _call_cusparse( + 'dense2csc', x.dtype, + handle, m, n, descr.descriptor, + x.data.ptr, m, nnz_per_col.data.ptr, + data.data.ptr, indices.data.ptr, indptr.data.ptr) + # Note that a descriptor is recreated + csc = cupyx.scipy.sparse.csc_matrix((data, indices, indptr), shape=x.shape) + csc._has_canonical_format = True + return csc + + +def dense2csr(x): + """Converts a dense matrix in CSR format. + + Args: + x (cupy.ndarray): A matrix to be converted. + + Returns: + cupyx.scipy.sparse.csr_matrix: A converted matrix. + + """ + if not check_availability('dense2csr'): + raise RuntimeError('dense2csr is not available.') + + assert x.ndim == 2 + x = _cupy.asfortranarray(x) + nnz = _numpy.empty((), dtype='i') + handle = _device.get_cusparse_handle() + m, n = x.shape + + descr = MatDescriptor.create() + nnz_per_row = _cupy.empty(m, 'i') + _call_cusparse( + 'nnz', x.dtype, + handle, _cusparse.CUSPARSE_DIRECTION_ROW, m, n, descr.descriptor, + x.data.ptr, m, nnz_per_row.data.ptr, nnz.ctypes.data) + + nnz = int(nnz) + if _runtime.is_hip: + if nnz == 0: + raise ValueError('hipSPARSE currently cannot handle ' + 'sparse matrices with null ptrs') + data = _cupy.empty(nnz, x.dtype) + indptr = _cupy.empty(m + 1, 'i') + indices = _cupy.empty(nnz, 'i') + + _call_cusparse( + 'dense2csr', x.dtype, + handle, m, n, descr.descriptor, + x.data.ptr, m, nnz_per_row.data.ptr, + data.data.ptr, indptr.data.ptr, indices.data.ptr) + # Note that a descriptor is recreated + csr = cupyx.scipy.sparse.csr_matrix((data, indices, indptr), shape=x.shape) + csr._has_canonical_format = True + return csr + + +def csr2csr_compress(x, tol): + if not check_availability('csr2csr_compress'): + raise RuntimeError('csr2csr_compress is not available.') + + assert x.dtype.char in 'fdFD' + + handle = _device.get_cusparse_handle() + m, n = x.shape + + nnz_per_row = _cupy.empty(m, 'i') + nnz = _call_cusparse( + 'nnz_compress', x.dtype, + handle, m, x._descr.descriptor, + x.data.data.ptr, x.indptr.data.ptr, nnz_per_row.data.ptr, tol) + data = _cupy.zeros(nnz, x.dtype) + indptr = _cupy.empty(m + 1, 'i') + indices = _cupy.zeros(nnz, 'i') + _call_cusparse( + 'csr2csr_compress', x.dtype, + handle, m, n, x._descr.descriptor, + x.data.data.ptr, x.indices.data.ptr, x.indptr.data.ptr, + x.nnz, nnz_per_row.data.ptr, data.data.ptr, indices.data.ptr, + indptr.data.ptr, tol) + + return cupyx.scipy.sparse.csr_matrix( + (data, indices, indptr), shape=x.shape) + + +def _dtype_to_IndexType(dtype): + if dtype == 'uint16': + return _cusparse.CUSPARSE_INDEX_16U + elif dtype == 'int32': + return _cusparse.CUSPARSE_INDEX_32I + elif dtype == 'int64': + return _cusparse.CUSPARSE_INDEX_64I + else: + raise TypeError + + +class BaseDescriptor(object): + + def __init__(self, descriptor, get=None, destroyer=None): + self.desc = descriptor + self.get = get + self.destroy = destroyer + + def __del__(self, is_shutting_down=_util.is_shutting_down): + if is_shutting_down(): + return + if self.destroy is None: + self.desc = None + elif self.desc is not None: + self.destroy(self.desc) + self.desc = None + + def __getattr__(self, name): + if self.get is not None: + return getattr(self.get(self.desc), name) + raise AttributeError + + +class SpMatDescriptor(BaseDescriptor): + + @classmethod + def create(cls, a): + assert cupyx.scipy.sparse.issparse(a) + rows, cols = a.shape + idx_base = _cusparse.CUSPARSE_INDEX_BASE_ZERO + cuda_dtype = _dtype.to_cuda_dtype(a.dtype) + if a.format == 'csr': + desc = _cusparse.createCsr( + rows, cols, a.nnz, a.indptr.data.ptr, a.indices.data.ptr, + a.data.data.ptr, _dtype_to_IndexType(a.indptr.dtype), + _dtype_to_IndexType(a.indices.dtype), idx_base, cuda_dtype) + get = _cusparse.csrGet + elif a.format == 'coo': + desc = _cusparse.createCoo( + rows, cols, a.nnz, a.row.data.ptr, a.col.data.ptr, + a.data.data.ptr, _dtype_to_IndexType(a.row.dtype), + idx_base, cuda_dtype) + get = _cusparse.cooGet + elif a.format == 'csc': + desc = _cusparse.createCsc( + rows, cols, a.nnz, a.indptr.data.ptr, a.indices.data.ptr, + a.data.data.ptr, _dtype_to_IndexType(a.indptr.dtype), + _dtype_to_IndexType(a.indices.dtype), idx_base, cuda_dtype) + get = None + else: + raise ValueError('csr, csc and coo format are supported ' + '(actual: {}).'.format(a.format)) + destroy = _cusparse.destroySpMat + return SpMatDescriptor(desc, get, destroy) + + def set_attribute(self, attribute, data): + _cusparse.spMatSetAttribute(self.desc, attribute, data) + + +class SpVecDescriptor(BaseDescriptor): + + @classmethod + def create(cls, idx, x): + nnz = x.size + cuda_dtype = _dtype.to_cuda_dtype(x.dtype) + desc = _cusparse.createSpVec(nnz, nnz, idx.data.ptr, x.data.ptr, + _dtype_to_IndexType(idx.dtype), + _cusparse.CUSPARSE_INDEX_BASE_ZERO, + cuda_dtype) + get = _cusparse.spVecGet + destroy = _cusparse.destroySpVec + return SpVecDescriptor(desc, get, destroy) + + +class DnVecDescriptor(BaseDescriptor): + + @classmethod + def create(cls, x): + cuda_dtype = _dtype.to_cuda_dtype(x.dtype) + desc = _cusparse.createDnVec(x.size, x.data.ptr, cuda_dtype) + get = _cusparse.dnVecGet + destroy = _cusparse.destroyDnVec + return DnVecDescriptor(desc, get, destroy) + + +class DnMatDescriptor(BaseDescriptor): + + @classmethod + def create(cls, a): + assert a.ndim == 2 + assert a.flags.f_contiguous + rows, cols = a.shape + ld = rows + cuda_dtype = _dtype.to_cuda_dtype(a.dtype) + desc = _cusparse.createDnMat(rows, cols, ld, a.data.ptr, cuda_dtype, + _cusparse.CUSPARSE_ORDER_COL) + get = _cusparse.dnMatGet + destroy = _cusparse.destroyDnMat + return DnMatDescriptor(desc, get, destroy) + + +def spmv(a, x, y=None, alpha=1, beta=0, transa=False): + """Multiplication of sparse matrix and dense vector. + + .. math:: + + y = \\alpha * op(A) x + \\beta * y + + Args: + a (cupyx.scipy.sparse.csr_matrix, csc_matrix or coo_matrix): + Sparse matrix A + x (cupy.ndarray): Dense vector x + y (cupy.ndarray or None): Dense vector y + alpha (scalar): Coefficient + beta (scalar): Coefficient + transa (bool): If ``True``, op(A) = transpose of A. + + Returns: + cupy.ndarray + """ + if not check_availability('spmv'): + raise RuntimeError('spmv is not available.') + + if isinstance(a, cupyx.scipy.sparse.csc_matrix): + aT = a.T + if not isinstance(aT, cupyx.scipy.sparse.csr_matrix): + msg = 'aT must be csr_matrix (actual: {})'.format(type(aT)) + raise TypeError(msg) + a = aT + transa = not transa + if not isinstance(a, (cupyx.scipy.sparse.csr_matrix, + cupyx.scipy.sparse.coo_matrix)): + raise TypeError('unsupported type (actual: {})'.format(type(a))) + a_shape = a.shape if not transa else a.shape[::-1] + if a_shape[1] != len(x): + raise ValueError('dimension mismatch') + assert a.has_canonical_format + + m, n = a_shape + a, x, y = _cast_common_type(a, x, y) + if y is None: + y = _cupy.zeros(m, a.dtype) + elif len(y) != m: + raise ValueError('dimension mismatch') + if a.nnz == 0: + y.fill(0) + return y + + desc_a = SpMatDescriptor.create(a) + desc_x = DnVecDescriptor.create(x) + desc_y = DnVecDescriptor.create(y) + + handle = _device.get_cusparse_handle() + op_a = _transpose_flag(transa) + alpha = _numpy.array(alpha, a.dtype).ctypes + beta = _numpy.array(beta, a.dtype).ctypes + cuda_dtype = _dtype.to_cuda_dtype(a.dtype) + alg = _cusparse.CUSPARSE_MV_ALG_DEFAULT + buff_size = _cusparse.spMV_bufferSize(handle, op_a, alpha.data, + desc_a.desc, desc_x.desc, beta.data, + desc_y.desc, cuda_dtype, alg) + buff = _cupy.empty(buff_size, _cupy.int8) + _cusparse.spMV(handle, op_a, alpha.data, desc_a.desc, desc_x.desc, + beta.data, desc_y.desc, cuda_dtype, alg, buff.data.ptr) + + return y + + +def spmm(a, b, c=None, alpha=1, beta=0, transa=False, transb=False): + """Multiplication of sparse matrix and dense matrix. + + .. math:: + + C = \\alpha * op(A) op(B) + \\beta * C + + Args: + a (cupyx.scipy.sparse.csr_matrix, csc_matrix or coo_matrix): + Sparse matrix A + b (cupy.ndarray): Dense matrix B + c (cupy.ndarray or None): Dense matrix C + alpha (scalar): Coefficient + beta (scalar): Coefficient + transa (bool): If ``True``, op(A) = transpose of A. + transb (bool): If ``True``, op(B) = transpose of B. + + Returns: + cupy.ndarray + """ + if not check_availability('spmm'): + raise RuntimeError('spmm is not available.') + + assert a.ndim == b.ndim == 2 + assert b.flags.f_contiguous + assert c is None or c.flags.f_contiguous + + if isinstance(a, cupyx.scipy.sparse.csc_matrix): + aT = a.T + if not isinstance(aT, cupyx.scipy.sparse.csr_matrix): + msg = 'aT must be csr_matrix (actual: {})'.format(type(aT)) + raise TypeError(msg) + a = aT + transa = not transa + if not isinstance(a, (cupyx.scipy.sparse.csr_matrix, + cupyx.scipy.sparse.coo_matrix)): + raise TypeError('unsupported type (actual: {})'.format(type(a))) + a_shape = a.shape if not transa else a.shape[::-1] + b_shape = b.shape if not transb else b.shape[::-1] + if a_shape[1] != b_shape[0]: + raise ValueError('dimension mismatch') + assert a.has_canonical_format + + m, k = a_shape + _, n = b_shape + a, b, c = _cast_common_type(a, b, c) + if c is None: + c = _cupy.zeros((m, n), a.dtype, 'F') + elif c.shape[0] != m or c.shape[1] != n: + raise ValueError('dimension mismatch') + if a.nnz == 0: + c.fill(0) + return c + + desc_a = SpMatDescriptor.create(a) + desc_b = DnMatDescriptor.create(b) + desc_c = DnMatDescriptor.create(c) + + handle = _device.get_cusparse_handle() + op_a = _transpose_flag(transa) + op_b = _transpose_flag(transb) + alpha = _numpy.array(alpha, a.dtype).ctypes + beta = _numpy.array(beta, a.dtype).ctypes + cuda_dtype = _dtype.to_cuda_dtype(a.dtype) + alg = _cusparse.CUSPARSE_MM_ALG_DEFAULT + buff_size = _cusparse.spMM_bufferSize(handle, op_a, op_b, alpha.data, + desc_a.desc, desc_b.desc, beta.data, + desc_c.desc, cuda_dtype, alg) + buff = _cupy.empty(buff_size, _cupy.int8) + buff_size = _cusparse.spMM(handle, op_a, op_b, alpha.data, desc_a.desc, + desc_b.desc, beta.data, desc_c.desc, + cuda_dtype, alg, buff.data.ptr) + + return c + + +def csrsm2(a, b, alpha=1.0, lower=True, unit_diag=False, transa=False, + blocking=True, level_info=False): + """Solves a sparse triangular linear system op(a) * x = alpha * b. + + Args: + a (cupyx.scipy.sparse.csr_matrix or cupyx.scipy.sparse.csc_matrix): + Sparse matrix with dimension ``(M, M)``. + b (cupy.ndarray): Dense vector or matrix with dimension ``(M)`` or + ``(M, K)``. + alpha (float or complex): Coefficient. + lower (bool): + True: ``a`` is lower triangle matrix. + False: ``a`` is upper triangle matrix. + unit_diag (bool): + True: diagonal part of ``a`` has unit elements. + False: diagonal part of ``a`` has non-unit elements. + transa (bool or str): True, False, 'N', 'T' or 'H'. + 'N' or False: op(a) == ``a``. + 'T' or True: op(a) == ``a.T``. + 'H': op(a) == ``a.conj().T``. + blocking (bool): + True: blocking algorithm is used. + False: non-blocking algorithm is used. + level_info (bool): + True: solves it with level information. + False: solves it without level information. + + Note: ``b`` will be overwritten. + """ + if not check_availability('csrsm2'): + raise RuntimeError('csrsm2 is not available.') + + if not (cupyx.scipy.sparse.isspmatrix_csr(a) or + cupyx.scipy.sparse.isspmatrix_csc(a)): + raise ValueError('a must be CSR or CSC sparse matrix') + if not isinstance(b, _cupy.ndarray): + raise ValueError('b must be cupy.ndarray') + if b.ndim not in (1, 2): + raise ValueError('b.ndim must be 1 or 2') + if not (a.shape[0] == a.shape[1] == b.shape[0]): + raise ValueError('invalid shape') + if a.dtype != b.dtype: + raise TypeError('dtype mismatch') + + if lower is True: + fill_mode = _cusparse.CUSPARSE_FILL_MODE_LOWER + elif lower is False: + fill_mode = _cusparse.CUSPARSE_FILL_MODE_UPPER + else: + raise ValueError('Unknown lower (actual: {})'.format(lower)) + + if unit_diag is False: + diag_type = _cusparse.CUSPARSE_DIAG_TYPE_NON_UNIT + elif unit_diag is True: + diag_type = _cusparse.CUSPARSE_DIAG_TYPE_UNIT + else: + raise ValueError('Unknown unit_diag (actual: {})'.format(unit_diag)) + + if blocking is False: + algo = 0 + elif blocking is True: + algo = 1 + else: + raise ValueError('Unknown blocking (actual: {})'.format(blocking)) + + if level_info is False: + policy = _cusparse.CUSPARSE_SOLVE_POLICY_NO_LEVEL + elif level_info is True: + policy = _cusparse.CUSPARSE_SOLVE_POLICY_USE_LEVEL + else: + raise ValueError('Unknown level_info (actual: {})'.format(level_info)) + + dtype = a.dtype + if dtype.char == 'f': + t = 's' + elif dtype.char == 'd': + t = 'd' + elif dtype.char == 'F': + t = 'c' + elif dtype.char == 'D': + t = 'z' + else: + raise TypeError('Invalid dtype (actual: {})'.format(dtype)) + helper = getattr(_cusparse, t + 'csrsm2_bufferSizeExt') + analysis = getattr(_cusparse, t + 'csrsm2_analysis') + solve = getattr(_cusparse, t + 'csrsm2_solve') + + if transa is False or transa == 'N': + transa = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + elif transa is True or transa == 'T': + transa = _cusparse.CUSPARSE_OPERATION_TRANSPOSE + elif transa == 'H': + if dtype.char in 'fd': + transa = _cusparse.CUSPARSE_OPERATION_TRANSPOSE + else: + transa = _cusparse.CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE + else: + raise ValueError('Unknown transa (actual: {})'.format(transa)) + + if cupyx.scipy.sparse.isspmatrix_csc(a): + if transa == _cusparse.CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE: + raise ValueError('If matrix is CSC format and complex dtype,' + 'transa must not be \'H\'') + a = a.T + assert cupyx.scipy.sparse.isspmatrix_csr(a) + transa = 1 - transa + fill_mode = 1 - fill_mode + + m = a.shape[0] + nrhs = 1 if b.ndim == 1 else b.shape[1] + if b._f_contiguous: + transb = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + ldb = m + elif b._c_contiguous: + transb = _cusparse.CUSPARSE_OPERATION_TRANSPOSE + ldb = nrhs + else: + raise ValueError('b must be F-contiguous or C-contiguous.') + + handle = _device.get_cusparse_handle() + alpha = _numpy.array(alpha, dtype=dtype) + a_desc = MatDescriptor.create() + a_desc.set_mat_type(_cusparse.CUSPARSE_MATRIX_TYPE_GENERAL) + a_desc.set_mat_index_base(_cusparse.CUSPARSE_INDEX_BASE_ZERO) + a_desc.set_mat_fill_mode(fill_mode) + a_desc.set_mat_diag_type(diag_type) + info = _cusparse.createCsrsm2Info() + ws_size = helper(handle, algo, transa, transb, m, nrhs, a.nnz, + alpha.ctypes.data, a_desc.descriptor, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, b.data.ptr, ldb, + info, policy) + ws = _cupy.empty((ws_size,), dtype=_numpy.int8) + + analysis(handle, algo, transa, transb, m, nrhs, a.nnz, alpha.ctypes.data, + a_desc.descriptor, a.data.data.ptr, a.indptr.data.ptr, + a.indices.data.ptr, b.data.ptr, ldb, info, policy, ws.data.ptr) + + solve(handle, algo, transa, transb, m, nrhs, a.nnz, alpha.ctypes.data, + a_desc.descriptor, a.data.data.ptr, a.indptr.data.ptr, + a.indices.data.ptr, b.data.ptr, ldb, info, policy, ws.data.ptr) + + # without sync we'd get either segfault or cuda context error + _stream.get_current_stream().synchronize() + _cusparse.destroyCsrsm2Info(info) + + +def csrilu02(a, level_info=False): + """Computes incomplete LU decomposition for a sparse square matrix. + + Args: + a (cupyx.scipy.sparse.csr_matrix): + Sparse matrix with dimension ``(M, M)``. + level_info (bool): + True: solves it with level information. + False: solves it without level information. + + Note: ``a`` will be overwritten. This function does not support fill-in + (only ILU(0) is supported) nor pivoting. + """ + if not check_availability('csrilu02'): + raise RuntimeError('csrilu02 is not available.') + + if not cupyx.scipy.sparse.isspmatrix_csr(a): + raise TypeError('a must be CSR sparse matrix') + if a.shape[0] != a.shape[1]: + raise ValueError('invalid shape (a.shape: {})'.format(a.shape)) + + if level_info is False: + policy = _cusparse.CUSPARSE_SOLVE_POLICY_NO_LEVEL + elif level_info is True: + policy = _cusparse.CUSPARSE_SOLVE_POLICY_USE_LEVEL + else: + raise ValueError('Unknown level_info (actual: {})'.format(level_info)) + + dtype = a.dtype + if dtype.char == 'f': + t = 's' + elif dtype.char == 'd': + t = 'd' + elif dtype.char == 'F': + t = 'c' + elif dtype.char == 'D': + t = 'z' + else: + raise TypeError('Invalid dtype (actual: {})'.format(dtype)) + helper = getattr(_cusparse, t + 'csrilu02_bufferSize') + analysis = getattr(_cusparse, t + 'csrilu02_analysis') + solve = getattr(_cusparse, t + 'csrilu02') + check = getattr(_cusparse, 'xcsrilu02_zeroPivot') + + handle = _device.get_cusparse_handle() + m = a.shape[0] + nnz = a.nnz + desc = MatDescriptor.create() + desc.set_mat_type(_cusparse.CUSPARSE_MATRIX_TYPE_GENERAL) + desc.set_mat_index_base(_cusparse.CUSPARSE_INDEX_BASE_ZERO) + info = _cusparse.createCsrilu02Info() + ws_size = helper(handle, m, nnz, desc.descriptor, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, info) + ws = _cupy.empty((ws_size,), dtype=_numpy.int8) + position = _numpy.empty((1,), dtype=_numpy.int32) + + analysis(handle, m, nnz, desc.descriptor, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, info, policy, ws.data.ptr) + try: + check(handle, info, position.ctypes.data) + except Exception: + raise ValueError('a({0},{0}) is missing'.format(position[0])) + + solve(handle, m, nnz, desc.descriptor, a.data.data.ptr, + a.indptr.data.ptr, a.indices.data.ptr, info, policy, ws.data.ptr) + try: + check(handle, info, position.ctypes.data) + except Exception: + raise ValueError('u({0},{0}) is zero'.format(position[0])) + + +def denseToSparse(x, format='csr'): + """Converts a dense matrix into a CSR, CSC or COO format. + + Args: + x (cupy.ndarray): A matrix to be converted. + format (str): Format of converted matrix. It must be either 'csr', + 'csc' or 'coo'. + + Returns: + cupyx.scipy.sparse.spmatrix: A converted sparse matrix. + + """ + if not check_availability('denseToSparse'): + raise RuntimeError('denseToSparse is not available.') + + assert x.ndim == 2 + assert x.dtype.char in 'fdFD' + x = _cupy.asfortranarray(x) + desc_x = DnMatDescriptor.create(x) + if format == 'csr': + y = cupyx.scipy.sparse.csr_matrix(x.shape, dtype=x.dtype) + elif format == 'csc': + y = cupyx.scipy.sparse.csc_matrix(x.shape, dtype=x.dtype) + elif format == 'coo': + y = cupyx.scipy.sparse.coo_matrix(x.shape, dtype=x.dtype) + else: + raise TypeError('unsupported format (actual: {})'.format(format)) + desc_y = SpMatDescriptor.create(y) + algo = _cusparse.CUSPARSE_DENSETOSPARSE_ALG_DEFAULT + handle = _device.get_cusparse_handle() + buff_size = _cusparse.denseToSparse_bufferSize(handle, desc_x.desc, + desc_y.desc, algo) + buff = _cupy.empty(buff_size, _cupy.int8) + _cusparse.denseToSparse_analysis(handle, desc_x.desc, + desc_y.desc, algo, buff.data.ptr) + num_rows_tmp = _numpy.array(0, dtype='int64') + num_cols_tmp = _numpy.array(0, dtype='int64') + nnz = _numpy.array(0, dtype='int64') + _cusparse.spMatGetSize(desc_y.desc, num_rows_tmp.ctypes.data, + num_cols_tmp.ctypes.data, nnz.ctypes.data) + nnz = int(nnz) + if _runtime.is_hip: + if nnz == 0: + raise ValueError('hipSPARSE currently cannot handle ' + 'sparse matrices with null ptrs') + if format == 'csr': + indptr = y.indptr + indices = _cupy.empty(nnz, 'i') + data = _cupy.empty(nnz, x.dtype) + y = cupyx.scipy.sparse.csr_matrix((data, indices, indptr), + shape=x.shape) + elif format == 'csc': + indptr = y.indptr + indices = _cupy.empty(nnz, 'i') + data = _cupy.empty(nnz, x.dtype) + y = cupyx.scipy.sparse.csc_matrix((data, indices, indptr), + shape=x.shape) + elif format == 'coo': + row = _cupy.zeros(nnz, 'i') + col = _cupy.zeros(nnz, 'i') + # Note: I would like to use empty() here, but that might cause an + # exception in the row/col number check when creating the coo_matrix, + # so I used zeros() instead. + data = _cupy.empty(nnz, x.dtype) + y = cupyx.scipy.sparse.coo_matrix((data, (row, col)), shape=x.shape) + desc_y = SpMatDescriptor.create(y) + _cusparse.denseToSparse_convert(handle, desc_x.desc, + desc_y.desc, algo, buff.data.ptr) + y._has_canonical_format = True + return y + + +def sparseToDense(x, out=None): + """Converts sparse matrix to a dense matrix. + + Args: + x (cupyx.scipy.sparse.spmatrix): A sparse matrix to convert. + out (cupy.ndarray or None): A dense metrix to store the result. + It must be F-contiguous. + + Returns: + cupy.ndarray: A converted dense matrix. + + """ + if not check_availability('sparseToDense'): + raise RuntimeError('sparseToDense is not available.') + + dtype = x.dtype + assert dtype.char in 'fdFD' + if out is None: + out = _cupy.zeros(x.shape, dtype=dtype, order='F') + else: + assert out.flags.f_contiguous + assert out.dtype == dtype + + desc_x = SpMatDescriptor.create(x) + desc_out = DnMatDescriptor.create(out) + algo = _cusparse.CUSPARSE_SPARSETODENSE_ALG_DEFAULT + handle = _device.get_cusparse_handle() + buff_size = _cusparse.sparseToDense_bufferSize(handle, desc_x.desc, + desc_out.desc, algo) + buff = _cupy.empty(buff_size, _cupy.int8) + if _runtime.is_hip: + if x.nnz == 0: + raise ValueError('hipSPARSE currently cannot handle ' + 'sparse matrices with null ptrs') + _cusparse.sparseToDense(handle, desc_x.desc, + desc_out.desc, algo, buff.data.ptr) + + return out + + +def spsm(a, b, alpha=1.0, lower=True, unit_diag=False, transa=False): + """Solves a sparse triangular linear system op(a) * x = alpha * op(b). + + Args: + a (cupyx.scipy.sparse.csr_matrix or cupyx.scipy.sparse.coo_matrix): + Sparse matrix with dimension ``(M, M)``. + b (cupy.ndarray): Dense matrix with dimension ``(M, K)``. + alpha (float or complex): Coefficient. + lower (bool): + True: ``a`` is lower triangle matrix. + False: ``a`` is upper triangle matrix. + unit_diag (bool): + True: diagonal part of ``a`` has unit elements. + False: diagonal part of ``a`` has non-unit elements. + transa (bool or str): True, False, 'N', 'T' or 'H'. + 'N' or False: op(a) == ``a``. + 'T' or True: op(a) == ``a.T``. + 'H': op(a) == ``a.conj().T``. + """ + if not check_availability('spsm'): + raise RuntimeError('spsm is not available.') + + # Canonicalise transa + if transa is False: + transa = 'N' + elif transa is True: + transa = 'T' + elif transa not in 'NTH': + raise ValueError(f'Unknown transa (actual: {transa})') + + # Check A's type and sparse format + if cupyx.scipy.sparse.isspmatrix_csr(a): + pass + elif cupyx.scipy.sparse.isspmatrix_csc(a): + if transa == 'N': + a = a.T + transa = 'T' + elif transa == 'T': + a = a.T + transa = 'N' + elif transa == 'H': + a = a.conj().T + transa = 'N' + lower = not lower + elif cupyx.scipy.sparse.isspmatrix_coo(a): + pass + else: + raise ValueError('a must be CSR, CSC or COO sparse matrix') + assert a.has_canonical_format + + # Check B's ndim + if b.ndim == 1: + is_b_vector = True + b = b.reshape(-1, 1) + elif b.ndim == 2: + is_b_vector = False + else: + raise ValueError('b.ndim must be 1 or 2') + + # Check shapes + if not (a.shape[0] == a.shape[1] == b.shape[0]): + raise ValueError('mismatched shape') + + # Check dtypes + dtype = a.dtype + if dtype.char not in 'fdFD': + raise TypeError('Invalid dtype (actual: {})'.format(dtype)) + if dtype != b.dtype: + raise TypeError('dtype mismatch') + + # Prepare fill mode + if lower is True: + fill_mode = _cusparse.CUSPARSE_FILL_MODE_LOWER + elif lower is False: + fill_mode = _cusparse.CUSPARSE_FILL_MODE_UPPER + else: + raise ValueError('Unknown lower (actual: {})'.format(lower)) + + # Prepare diag type + if unit_diag is False: + diag_type = _cusparse.CUSPARSE_DIAG_TYPE_NON_UNIT + elif unit_diag is True: + diag_type = _cusparse.CUSPARSE_DIAG_TYPE_UNIT + else: + raise ValueError('Unknown unit_diag (actual: {})'.format(unit_diag)) + + # Prepare op_a + if transa == 'N': + op_a = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + elif transa == 'T': + op_a = _cusparse.CUSPARSE_OPERATION_TRANSPOSE + else: # transa == 'H' + if dtype.char in 'fd': + op_a = _cusparse.CUSPARSE_OPERATION_TRANSPOSE + else: + op_a = _cusparse.CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE + + # Prepare op_b + if b._f_contiguous: + op_b = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + elif b._c_contiguous: + if _cusparse.get_build_version() < 11701: # earlier than CUDA 11.6 + raise ValueError('b must be F-contiguous.') + b = b.T + op_b = _cusparse.CUSPARSE_OPERATION_TRANSPOSE + else: + raise ValueError('b must be F-contiguous or C-contiguous.') + + # Allocate space for matrix C. Note that it is known cusparseSpSM requires + # the output matrix zero initialized. + m, _ = a.shape + if op_b == _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE: + _, n = b.shape + else: + n, _ = b.shape + c_shape = m, n + c = _cupy.zeros(c_shape, dtype=a.dtype, order='f') + + # Prepare descriptors and other parameters + handle = _device.get_cusparse_handle() + mat_a = SpMatDescriptor.create(a) + mat_b = DnMatDescriptor.create(b) + mat_c = DnMatDescriptor.create(c) + spsm_descr = _cusparse.spSM_createDescr() + alpha = _numpy.array(alpha, dtype=c.dtype).ctypes + cuda_dtype = _dtype.to_cuda_dtype(c.dtype) + algo = _cusparse.CUSPARSE_SPSM_ALG_DEFAULT + + try: + # Specify Lower|Upper fill mode + mat_a.set_attribute(_cusparse.CUSPARSE_SPMAT_FILL_MODE, fill_mode) + + # Specify Unit|Non-Unit diagonal type + mat_a.set_attribute(_cusparse.CUSPARSE_SPMAT_DIAG_TYPE, diag_type) + + # Allocate the workspace needed by the succeeding phases + buff_size = _cusparse.spSM_bufferSize( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, + mat_c.desc, cuda_dtype, algo, spsm_descr) + buff = _cupy.empty(buff_size, dtype=_cupy.int8) + + # Perform the analysis phase + _cusparse.spSM_analysis( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, + mat_c.desc, cuda_dtype, algo, spsm_descr, buff.data.ptr) + + # Executes the solve phase + _cusparse.spSM_solve( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, + mat_c.desc, cuda_dtype, algo, spsm_descr, buff.data.ptr) + + # Reshape back if B was a vector + if is_b_vector: + c = c.reshape(-1) + + return c + + finally: + # Destroy matrix/vector descriptors + _cusparse.spSM_destroyDescr(spsm_descr) + + +def spgemm(a, b, alpha=1): + """Matrix-matrix product for CSR-matrix. + + math:: + C = alpha * A * B + + Args: + a (cupyx.scipy.sparse.csr_matrix): Sparse matrix A. + b (cupyx.scipy.sparse.csr_matrix): Sparse matrix B. + alpha (scalar): Coefficient + + Returns: + cupyx.scipy.sparse.csr_matrix + + """ + if not check_availability('spgemm'): + raise RuntimeError('spgemm is not available.') + + assert a.ndim == b.ndim == 2 + if not isinstance(a, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(a))) + if not isinstance(b, cupyx.scipy.sparse.csr_matrix): + raise TypeError('unsupported type (actual: {})'.format(type(b))) + assert a.has_canonical_format + assert b.has_canonical_format + if a.shape[1] != b.shape[0]: + raise ValueError('mismatched shape') + + m, k = a.shape + _, n = b.shape + a, b = _cast_common_type(a, b) + c_shape = (m, n) + c = cupyx.scipy.sparse.csr_matrix((c_shape), dtype=a.dtype) + + handle = _device.get_cusparse_handle() + mat_a = SpMatDescriptor.create(a) + mat_b = SpMatDescriptor.create(b) + mat_c = SpMatDescriptor.create(c) + spgemm_descr = _cusparse.spGEMM_createDescr() + op_a = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + op_b = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + alpha = _numpy.array(alpha, dtype=c.dtype).ctypes + beta = _numpy.array(0, dtype=c.dtype).ctypes + cuda_dtype = _dtype.to_cuda_dtype(c.dtype) + algo = _cusparse.CUSPARSE_SPGEMM_DEFAULT + null_ptr = 0 + + # Analyze the matrices A and B to understand the memory requirement + buff1_size = _cusparse.spGEMM_workEstimation( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, beta.data, + mat_c.desc, cuda_dtype, algo, spgemm_descr, 0, null_ptr) + buff1 = _cupy.empty(buff1_size, _cupy.int8) + _cusparse.spGEMM_workEstimation( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, beta.data, + mat_c.desc, cuda_dtype, algo, spgemm_descr, buff1_size, buff1.data.ptr) + + # Compute the intermediate product of A and B + buff2_size = _cusparse.spGEMM_compute( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, beta.data, + mat_c.desc, cuda_dtype, algo, spgemm_descr, 0, null_ptr) + buff2 = _cupy.empty(buff2_size, _cupy.int8) + _cusparse.spGEMM_compute( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, beta.data, + mat_c.desc, cuda_dtype, algo, spgemm_descr, buff2_size, buff2.data.ptr) + + # Prepare the arrays for matrix C + c_num_rows = _numpy.array(0, dtype='int64') + c_num_cols = _numpy.array(0, dtype='int64') + c_nnz = _numpy.array(0, dtype='int64') + _cusparse.spMatGetSize(mat_c.desc, c_num_rows.ctypes.data, + c_num_cols.ctypes.data, c_nnz.ctypes.data) + assert c_shape[0] == int(c_num_rows) + assert c_shape[1] == int(c_num_cols) + c_nnz = int(c_nnz) + c_indptr = c.indptr + c_indices = _cupy.empty(c_nnz, 'i') + c_data = _cupy.empty(c_nnz, c.dtype) + _cusparse.csrSetPointers(mat_c.desc, c_indptr.data.ptr, c_indices.data.ptr, + c_data.data.ptr) + + # Copy the final product to the matrix C + _cusparse.spGEMM_copy( + handle, op_a, op_b, alpha.data, mat_a.desc, mat_b.desc, beta.data, + mat_c.desc, cuda_dtype, algo, spgemm_descr) + c = cupyx.scipy.sparse.csr_matrix((c_data, c_indices, c_indptr), + shape=c_shape) + + _cusparse.spGEMM_destroyDescr(spgemm_descr) + return c diff --git a/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff9a64f56c0178f697af7747de45ea9964aae596 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__init__.py @@ -0,0 +1,10 @@ +from cupy import _util + +# Attributes and Methods for fallback_mode +# Auto-execute numpy method when corresponding cupy method is not found + +# "NOQA" to suppress flake8 warning +from cupyx.fallback_mode.fallback import numpy # NOQA + + +_util.experimental('cupyx.fallback_mode.numpy') diff --git a/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f06a802d48e767dfbcfa32905e737bc70bf384ab Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/fallback.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/fallback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751a683112dc7d312e9d264698f46f56284515fd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/fallback.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/notification.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/notification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7a0f2d782d951a8ccb39533cb40d4592cdf4d87 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/__pycache__/notification.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/fallback.py b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/fallback.py new file mode 100644 index 0000000000000000000000000000000000000000..af87c8b1ed27dd3b41dd0c3b7764e69a65a2071f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/fallback.py @@ -0,0 +1,596 @@ +""" +`fallback_mode` for cupy. Whenever a method is not yet implemented in CuPy, +it will fallback to corresponding NumPy method. +""" +import types + +import numpy as np + +import cupy as cp + + +from cupyx.fallback_mode import notification + + +class _RecursiveAttr(object): + """ + RecursiveAttr class to catch all attributes corresponding to numpy, + when user calls fallback_mode. numpy is an instance of this class. + """ + + def __init__(self, numpy_object, cupy_object, array=None): + """ + _RecursiveAttr initializer. + + Args: + numpy_object (method): NumPy method. + cupy_method (method): Corresponding CuPy method. + array (ndarray): Acts as flag to know if _RecursiveAttr object + is called from ``ndarray`` class. Also, acts as container for + modifying args in case it is called from ``ndarray``. + None otherwise. + """ + + self._numpy_object = numpy_object + self._cupy_object = cupy_object + self._fallback_array = array + + def __instancecheck__(self, instance): + """ + Enable support for isinstance(instance, _RecursiveAttr instance) + by redirecting it to appropriate isinstance method. + """ + + if self._cupy_object is not None: + return isinstance(instance, self._cupy_object) + + return isinstance(instance, self._numpy_object) + + def __getattr__(self, attr): + """ + Catches attributes corresponding to numpy. + + Runs recursively till attribute gets called. + Or numpy ScalarType is retrieved. + + Args: + attr (str): Attribute of _RecursiveAttr class object. + + Returns: + (_RecursiveAttr object, NumPy scalar): + Returns_RecursiveAttr object with new numpy_object, + cupy_object. OR + Returns objects in cupy which is an alias + of numpy object. OR + Returns wrapper objects, `ndarray`, `vectorize`. + """ + + numpy_object = getattr(self._numpy_object, attr) + cupy_object = getattr(self._cupy_object, attr, None) + + if numpy_object is np.ndarray: + return ndarray + + if numpy_object is np.vectorize: + return vectorize + + if numpy_object is cupy_object: + return numpy_object + + return _RecursiveAttr(numpy_object, cupy_object) + + def __repr__(self): + + if isinstance(self._numpy_object, types.ModuleType): + return "".format( + self._numpy_object.__name__, + getattr(self._cupy_object, '__name__', None)) + + return "".format( + self._numpy_object, self._cupy_object) + + @property + def __doc__(self): + return self._numpy_object.__doc__ + + @staticmethod + def _is_cupy_compatible(arg): + """ + Returns False if CuPy's functions never accept the arguments as + parameters due to the following reasons. + - The inputs include an object of a NumPy's specific class other than + `np.ndarray`. + - The inputs include a dtype which is not supported in CuPy. + """ + + if isinstance(arg, ndarray): + if not arg._supports_cupy: + return False + + if isinstance(arg, (tuple, list)): + return all(_RecursiveAttr._is_cupy_compatible(i) for i in arg) + + if isinstance(arg, dict): + bools = [_RecursiveAttr._is_cupy_compatible(arg[i]) for i in arg] + return all(bools) + + return True + + def __call__(self, *args, **kwargs): + """ + Gets invoked when last attribute of _RecursiveAttr class gets called. + Calls _cupy_object if not None else call _numpy_object. + + Args: + args (tuple): Arguments. + kwargs (dict): Keyword arguments. + + Returns: + (res, ndarray): Returns of methods call_cupy or call_numpy + """ + + if not callable(self._numpy_object): + raise TypeError("'{}' object is not callable".format( + type(self._numpy_object).__name__)) + + # _RecursiveAttr gets called from ndarray + if self._fallback_array is not None: + args = ((self._fallback_array,) + args) + + if self._cupy_object is not None and \ + _RecursiveAttr._is_cupy_compatible((args, kwargs)): + try: + return _call_cupy(self._cupy_object, args, kwargs) + except Exception: + return _call_numpy(self._numpy_object, args, kwargs) + + notification._dispatch_notification(self._numpy_object) + return _call_numpy(self._numpy_object, args, kwargs) + + +numpy = _RecursiveAttr(np, cp) + + +# ----------------------------------------------------------------------------- +# proxying of ndarray magic methods and wrappers +# ----------------------------------------------------------------------------- + + +class ndarray(object): + """ + Wrapper around cupy.ndarray + Supports cupy.ndarray.__init__ as well as, + gets initialized with a cupy ndarray. + """ + + __doc__ = np.ndarray.__doc__ + + def __new__(cls, *args, **kwargs): + """ + If `_initial_array` and `_supports_cupy` are arguments, + initialize cls(ndarray). + Else get cupy.ndarray from provided arguments, + then initialize cls(ndarray). + """ + _initial_array = kwargs.get('_initial_array', None) + if _initial_array is not None: + return object.__new__(cls) + + cupy_ndarray_init = cp.ndarray(*args, **kwargs) + return cls(_initial_array=cupy_ndarray_init, _supports_cupy=True) + + def __init__(self, *args, **kwargs): + """ + Args: + _initial_array (None, cp.ndarray/np.ndarray(including variants)): + If _initial_array is None, object is not initialized. + Otherwise, _initial_array (ndarray) would be set to + _cupy_array and/or _numpy_array depending upon _supports_cupy. + _supports_cupy (bool): If _supports_cupy is True, _initial_array + is set as _cupy_array and _numpy_array. + Otherwise, _initial_array is set as only _numpy_array. + + Attributes: + _cupy_array (None or cp.ndarray): ndarray fully compatible with + CuPy. This will be always set to a ndarray in GPU. + _numpy_array (None or np.ndarray(including variants)): ndarray not + supported by CuPy. Such as np.ndarray (where dtype is not in + '?bhilqBHILQefdFD') and it's variants. This will be always set + to a ndarray in CPU. + _supports_cupy (bool): If _supports_cupy is True, data of array + will contain in _cupy_array and _numpy_array. + Else only _numpy_array will have the data. + """ + + _supports_cupy = kwargs.pop('_supports_cupy', None) + _initial_array = kwargs.pop('_initial_array', None) + if _initial_array is None: + return + + self._cupy_array = None + self._numpy_array = None + self.base = None + self._supports_cupy = _supports_cupy + + assert isinstance(_initial_array, (cp.ndarray, np.ndarray)) + if _supports_cupy: + if type(_initial_array) is cp.ndarray: + # _initial_array is in GPU memory + # called by _store_array_from_cupy + self._cupy_array = _initial_array + self._remember_numpy = False + else: + # _initial_array is in CPU memory + # called by _store_array_from_numpy + self._numpy_array = _initial_array + self._remember_numpy = True + else: + self._numpy_array = _initial_array + + @classmethod + def _store_array_from_cupy(cls, array): + return cls(_initial_array=array, _supports_cupy=True) + + @classmethod + def _store_array_from_numpy(cls, array): + if type(array) is np.ndarray and \ + array.dtype.kind in '?bhilqBHILQefdFD': + return cls(_initial_array=array, _supports_cupy=True) + + return cls(_initial_array=array, _supports_cupy=False) + + @property + def dtype(self): + if self._supports_cupy and not self._remember_numpy: + return self._cupy_array.dtype + return self._numpy_array.dtype + + def __getattr__(self, attr): + """ + Catches attributes corresponding to ndarray. + + Args: + attr (str): Attribute of ndarray class. + + Returns: + (_RecursiveAttr object, self._array.attr): + Returns_RecursiveAttr object with numpy_object, cupy_object. + Returns self._array.attr if attr is not callable. + """ + + if self._supports_cupy: + cupy_object = getattr(cp.ndarray, attr, None) + numpy_object = getattr(np.ndarray, attr) + else: + cupy_object = None + numpy_object = getattr(self._numpy_array.__class__, attr) + + if not callable(numpy_object): + if self._supports_cupy: + if self._remember_numpy: + self._update_cupy_array() + return getattr(self._cupy_array, attr) + return getattr(self._numpy_array, attr) + + return _RecursiveAttr(numpy_object, cupy_object, self) + + def _get_cupy_array(self): + """ + Returns _cupy_array (cupy.ndarray) of ndarray object. And marks + self(ndarray) and it's base (if exist) as numpy not up-to-date. + """ + base = self.base + if base is not None: + base._remember_numpy = False + self._remember_numpy = False + return self._cupy_array + + def _get_numpy_array(self): + """ + Returns _numpy_array (ex: np.ndarray, numpy.ma.MaskedArray, + numpy.chararray etc.) of ndarray object. And marks self(ndarray) + and it's base (if exist) as numpy up-to-date. + """ + base = self.base + if base is not None and base._supports_cupy: + base._remember_numpy = True + if self._supports_cupy: + self._remember_numpy = True + return self._numpy_array + + def _update_numpy_array(self): + """ + Updates _numpy_array from _cupy_array. + To be executed before calling numpy function. + """ + base = self.base + _type = np.ndarray if self._supports_cupy \ + else self._numpy_array.__class__ + + if self._supports_cupy: + # cupy-compatible + if base is None: + if not self._remember_numpy: + if self._numpy_array is None: + self._numpy_array = cp.asnumpy(self._cupy_array) + else: + self._cupy_array.get(out=self._numpy_array) + else: + if not base._remember_numpy: + base._update_numpy_array() + if self._numpy_array is None: + self._numpy_array = base._numpy_array.view(type=_type) + self._numpy_array.shape = self._cupy_array.shape + self._numpy_array.strides = self._cupy_array.strides + else: + # not cupy-compatible + if base is not None: + assert base._supports_cupy + if not base._remember_numpy: + base._update_numpy_array() + + def _update_cupy_array(self): + """ + Updates _cupy_array from _numpy_array. + To be executed before calling cupy function. + """ + base = self.base + + if base is None: + if self._remember_numpy: + if self._cupy_array is None: + self._cupy_array = cp.array(self._numpy_array) + else: + self._cupy_array[:] = self._numpy_array + else: + if base._remember_numpy: + base._update_cupy_array() + + +def _create_magic_methods(): + """ + Set magic methods of cupy.ndarray as methods of fallback.ndarray. + """ + + # Decorator for ndarray magic methods + def make_method(name): + def method(self, *args, **kwargs): + CLASS = cp.ndarray if self._supports_cupy \ + else self._numpy_array.__class__ + _method = getattr(CLASS, name) + args = ((self,) + args) + if self._supports_cupy: + return _call_cupy(_method, args, kwargs) + return _call_numpy(_method, args, kwargs) + method.__doc__ = getattr(np.ndarray, name).__doc__ + return method + + for method in ( + # Comparison operators: + '__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__', + + # Unary operations: + '__neg__', '__pos__', '__abs__', '__invert__', + + # Arithmetic: + '__add__', '__sub__', '__mul__', '__truediv__', '__floordiv__', + '__mod__', '__divmod__', '__pow__', '__lshift__', '__rshift__', + '__and__', '__or__', '__xor__', + + # Arithmetic, in-place: + '__iadd__', '__isub__', '__imul__', '__itruediv__', '__ifloordiv__', + '__imod__', '__ipow__', '__ilshift__', '__irshift__', + '__iand__', '__ior__', '__ixor__', + '__matmul__', + + # reflected-methods: + '__radd__', '__rsub__', '__rmul__', '__rtruediv__', '__rfloordiv__', + '__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__', + '__rand__', '__ror__', '__rxor__', + '__rmatmul__', + + # For standard library functions: + '__copy__', '__deepcopy__', '__reduce__', + + # Container customization: + '__iter__', '__len__', '__getitem__', '__setitem__', + + # Conversion: + '__bool__', '__int__', '__float__', '__complex__', + + # String representations: + '__repr__', '__str__' + ): + setattr(ndarray, method, make_method(method)) + + +_create_magic_methods() + + +class vectorize(object): + + __doc__ = np.vectorize.__doc__ + + def __init__(self, *args, **kwargs): + # NumPy will raise error if pyfunc is a cupy method + self.__dict__['_is_numpy_pyfunc'] = False + self.__dict__['_cupy_support'] = False + if isinstance(args[0], _RecursiveAttr): + self.__dict__['_is_numpy_pyfunc'] = True + if args[0]._cupy_object: + self.__dict__['_cupy_support'] = True + args = (args[0]._numpy_object,) + args[1:] + notification._dispatch_notification(np.vectorize) + self.__dict__['vec_obj'] = np.vectorize(*args, **kwargs) + self.__dict__['__doc__'] = self.__dict__['vec_obj'].__doc__ + + def __getattr__(self, attr): + return getattr(self.__dict__['vec_obj'], attr) + + def __setattr__(self, name, value): + return setattr(self.vec_obj, name, value) + + def __call__(self, *args, **kwargs): + if self._is_numpy_pyfunc: + notification._dispatch_notification( + self.vec_obj.pyfunc, self._cupy_support) + return _call_numpy(self.vec_obj, args, kwargs) + + +# ----------------------------------------------------------------------------- +# Data Transfer methods +# ----------------------------------------------------------------------------- + + +def _get_xp_args(ndarray_instance, to_xp, arg): + """ + Converts ndarray_instance type object to target object using to_xp. + + Args: + ndarray_instance (numpy.ndarray, cupy.ndarray or fallback.ndarray): + Objects of type `ndarray_instance` will be converted using `to_xp`. + to_xp (FunctionType): Method to convert ndarray_instance type objects. + arg (object): `ndarray_instance`, `tuple`, `list` and `dict` type + objects will be returned by either converting the object or it's + elements, if object is iterable. Objects of other types is + returned as it is. + + Returns: + Return data structure will be same as before after converting ndarrays. + """ + + if isinstance(arg, ndarray_instance): + return to_xp(arg) + + if isinstance(arg, tuple): + return tuple([_get_xp_args(ndarray_instance, to_xp, x) for x in arg]) + + if isinstance(arg, dict): + return {x_name: _get_xp_args(ndarray_instance, to_xp, x) + for x_name, x in arg.items()} + + if isinstance(arg, list): + return [_get_xp_args(ndarray_instance, to_xp, x) for x in arg] + + return arg + + +def _convert_numpy_to_fallback(numpy_res): + return _get_xp_args(np.ndarray, ndarray._store_array_from_numpy, numpy_res) + + +def _convert_fallback_to_numpy(args, kwargs): + return _get_xp_args(ndarray, ndarray._get_numpy_array, (args, kwargs)) + + +def _convert_fallback_to_cupy(args, kwargs): + return _get_xp_args(ndarray, ndarray._get_cupy_array, (args, kwargs)) + + +def _convert_cupy_to_fallback(cupy_res): + return _get_xp_args(cp.ndarray, ndarray._store_array_from_cupy, cupy_res) + + +def _update_numpy_args(args, kwargs): + return _get_xp_args(ndarray, ndarray._update_numpy_array, (args, kwargs)) + + +def _update_cupy_args(args, kwargs): + return _get_xp_args(ndarray, ndarray._update_cupy_array, (args, kwargs)) + + +# ----------------------------------------------------------------------------- +# utils +# ----------------------------------------------------------------------------- + + +def _call_cupy(func, args, kwargs): + """ + Calls cupy function with *args and **kwargs and + does necessary data transfers. + + Args: + func: A cupy function that needs to be called. + args (tuple): Arguments. + kwargs (dict): Keyword arguments. + + Returns: + Result after calling func and performing data transfers. + """ + + _update_cupy_args(args, kwargs) + cupy_args, cupy_kwargs = _convert_fallback_to_cupy(args, kwargs) + cupy_res = func(*cupy_args, **cupy_kwargs) + + # If existing argument is being returned + ext_res = _get_same_reference( + cupy_res, cupy_args, cupy_kwargs, args, kwargs) + if ext_res is not None: + return ext_res + + if isinstance(cupy_res, cp.ndarray): + if cupy_res.base is None: + # Don't share memory + fallback_res = _convert_cupy_to_fallback(cupy_res) + else: + # Share memory with one of the arguments + base_arg = _get_same_reference( + cupy_res.base, cupy_args, cupy_kwargs, args, kwargs) + fallback_res = _convert_cupy_to_fallback(cupy_res) + fallback_res.base = base_arg + return fallback_res + return cupy_res + + +def _call_numpy(func, args, kwargs): + """ + Calls numpy function with *args and **kwargs and + does necessary data transfers. + + Args: + func: A numpy function that needs to be called. + args (tuple): Arguments. + kwargs (dict): Keyword arguments. + + Returns: + Result after calling func and performing data transfers. + """ + + _update_numpy_args(args, kwargs) + numpy_args, numpy_kwargs = _convert_fallback_to_numpy(args, kwargs) + numpy_res = func(*numpy_args, **numpy_kwargs) + + # If existing argument is being returned + ext_res = _get_same_reference( + numpy_res, numpy_args, numpy_kwargs, args, kwargs) + if ext_res is not None: + return ext_res + + if isinstance(numpy_res, np.ndarray): + if numpy_res.base is None: + # Don't share memory + fallback_res = _convert_numpy_to_fallback(numpy_res) + else: + # Share memory with one of the arguments + base_arg = _get_same_reference( + numpy_res.base, numpy_args, numpy_kwargs, args, kwargs) + fallback_res = _convert_numpy_to_fallback(numpy_res) + fallback_res.base = base_arg + return fallback_res + return numpy_res + + +def _get_same_reference(res, args, kwargs, ret_args, ret_kwargs): + """ + Returns object corresponding to res in (args, kwargs) + from (ret_args, ret_kwargs) + """ + for i in range(len(args)): + if res is args[i]: + return ret_args[i] + + for key in kwargs: + if res is kwargs[key]: + return ret_kwargs[key] + + return diff --git a/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/notification.py b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/notification.py new file mode 100644 index 0000000000000000000000000000000000000000..9912a0954ec714889be92f94683a7fb0f12c6272 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/fallback_mode/notification.py @@ -0,0 +1,75 @@ +""" +Methods related to notifications. +""" + +import warnings + +from cupyx import _ufunc_config + + +def _init_warnings(): + FallbackWarning = type('FallbackWarning', (Warning,), {}) + warnings.simplefilter(action='always', category=FallbackWarning) + return FallbackWarning + + +def _dispatch_notification(func, cupy_support=False): + """ + Dispatch notifications using appropriate dispatch type. + """ + + dispatch_type = _ufunc_config.get_config_fallback_mode() + + _module = getattr(func, '__module__', None) + _name = getattr(func, '__name__', None) + + if not cupy_support: + if _name and _module: + msg = "'{}' method not in cupy, falling back to '{}.{}'".format( + _name, _module, _name) + elif _name: + msg = "'{}' method not in cupy, ".format(_name) + msg += "falling back to its numpy implementation" + else: + msg = "This method is not available in cupy, " + msg += "falling back to numpy" + + if _name: + raise_msg = "'{}' method not found in cupy".format(_name) + else: + raise_msg = "This method is not available in cupy" + else: + if _name and _module: + msg = "'{}' method is available in cupy but ".format(_name) + msg += "cannot be used, falling back to '{}.{}'".format( + _module, _name) + elif _name: + msg = "'{}' method is available in cupy but ".format(_name) + msg += "cannot be used, falling back to its numpy implementation" + else: + msg = "This method is available in cupy, but cannot be used" + msg += "falling back to numpy" + + if _name: + raise_msg = "'{}' method is available in cupy ".format(_name) + raise_msg += "but cannot be used" + else: + raise_msg = "This method is available in cupy but cannot be used" + + if dispatch_type == 'print': + print("Warning: {}".format(msg)) + + elif dispatch_type == 'warn': + warnings.warn(msg, FallbackWarning, stacklevel=3) + + elif dispatch_type == 'ignore': + pass + + elif dispatch_type == 'raise': + raise AttributeError(raise_msg) + + else: + assert False + + +FallbackWarning = _init_warnings() diff --git a/vllm/lib/python3.10/site-packages/cupyx/lapack.py b/vllm/lib/python3.10/site-packages/cupyx/lapack.py new file mode 100644 index 0000000000000000000000000000000000000000..0372d0f26e916580313f98d4e4e1efa7bc78dc03 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/lapack.py @@ -0,0 +1,348 @@ +import numpy as _numpy + +import cupy as _cupy +from cupy_backends.cuda.libs import cublas as _cublas +from cupy.cuda import device as _device + + +def gesv(a, b): + """Solve a linear matrix equation using cusolverDngetr[fs](). + + Computes the solution to a system of linear equation ``ax = b``. + + Args: + a (cupy.ndarray): The matrix with dimension ``(M, M)``. + b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``. + + Returns: + cupy.ndarray: + The matrix with dimension ``(M)`` or ``(M, K)``. + + Note: ``a`` and ``b`` will be overwritten. + """ + from cupy_backends.cuda.libs import cusolver as _cusolver + + if a.ndim != 2: + raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim)) + if b.ndim not in (1, 2): + raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim)) + if a.shape[0] != a.shape[1]: + raise ValueError('a must be a square matrix.') + if a.shape[0] != b.shape[0]: + raise ValueError('shape mismatch (a: {}, b: {}).'. + format(a.shape, b.shape)) + if a.dtype != b.dtype: + raise TypeError('dtype mismatch (a: {}, b: {})'. + format(a.dtype, b.dtype)) + dtype = a.dtype + if dtype == 'f': + t = 's' + elif dtype == 'd': + t = 'd' + elif dtype == 'F': + t = 'c' + elif dtype == 'D': + t = 'z' + else: + raise TypeError('unsupported dtype (actual:{})'.format(a.dtype)) + helper = getattr(_cusolver, t + 'getrf_bufferSize') + getrf = getattr(_cusolver, t + 'getrf') + getrs = getattr(_cusolver, t + 'getrs') + + n = b.shape[0] + nrhs = b.shape[1] if b.ndim == 2 else 1 + if a._f_contiguous: + trans = _cublas.CUBLAS_OP_N + elif a._c_contiguous: + trans = _cublas.CUBLAS_OP_T + else: + raise ValueError('a must be F-contiguous or C-contiguous.') + if not b._f_contiguous: + raise ValueError('b must be F-contiguous.') + + handle = _device.get_cusolver_handle() + dipiv = _cupy.empty(n, dtype=_numpy.int32) + dinfo = _cupy.empty(1, dtype=_numpy.int32) + lwork = helper(handle, n, n, a.data.ptr, n) + dwork = _cupy.empty(lwork, dtype=a.dtype) + # LU factrization (A = L * U) + getrf(handle, n, n, a.data.ptr, n, dwork.data.ptr, dipiv.data.ptr, + dinfo.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + getrf, dinfo) + # Solves Ax = b + getrs(handle, trans, n, nrhs, a.data.ptr, n, + dipiv.data.ptr, b.data.ptr, n, dinfo.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + getrs, dinfo) + + +def gels(a, b): + """Solves over/well/under-determined linear systems. + + Computes least-square solution to equation ``ax = b` by QR factorization + using cusolverDngeqrf(). + + Args: + a (cupy.ndarray): The matrix with dimension ``(M, N)``. + b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``. + + Returns: + cupy.ndarray: + The matrix with dimension ``(N)`` or ``(N, K)``. + """ + from cupy_backends.cuda.libs import cusolver as _cusolver + + if a.ndim != 2: + raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim)) + if b.ndim == 1: + nrhs = 1 + elif b.ndim == 2: + nrhs = b.shape[1] + else: + raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim)) + if a.shape[0] != b.shape[0]: + raise ValueError('shape mismatch (a: {}, b: {}).'. + format(a.shape, b.shape)) + if a.dtype != b.dtype: + raise ValueError('dtype mismatch (a: {}, b: {}).'. + format(a.dtype, b.dtype)) + + dtype = a.dtype + if dtype == 'f': + t = 's' + elif dtype == 'd': + t = 'd' + elif dtype == 'F': + t = 'c' + elif dtype == 'D': + t = 'z' + else: + raise ValueError('unsupported dtype (actual: {})'.format(dtype)) + + geqrf_helper = getattr(_cusolver, t + 'geqrf_bufferSize') + geqrf = getattr(_cusolver, t + 'geqrf') + trsm = getattr(_cublas, t + 'trsm') + if t in 'sd': + ormqr_helper = getattr(_cusolver, t + 'ormqr_bufferSize') + ormqr = getattr(_cusolver, t + 'ormqr') + else: + ormqr_helper = getattr(_cusolver, t + 'unmqr_bufferSize') + ormqr = getattr(_cusolver, t + 'unmqr') + + no_trans = _cublas.CUBLAS_OP_N + if dtype.char in 'fd': + trans = _cublas.CUBLAS_OP_T + else: + trans = _cublas.CUBLAS_OP_C + + m, n = a.shape + mn_min = min(m, n) + dev_info = _cupy.empty(1, dtype=_numpy.int32) + tau = _cupy.empty(mn_min, dtype=dtype) + cusolver_handle = _device.get_cusolver_handle() + cublas_handle = _device.get_cublas_handle() + one = _numpy.array(1.0, dtype=dtype) + + if m >= n: # over/well-determined systems + a = a.copy(order='F') + b = b.copy(order='F') + + # geqrf (QR decomposition, A = Q * R) + ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m) + workspace = _cupy.empty(ws_size, dtype=dtype) + geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr, + workspace.data.ptr, ws_size, dev_info.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + geqrf, dev_info) + + # ormqr (Computes Q^T * B) + ws_size = ormqr_helper( + cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min, + a.data.ptr, m, tau.data.ptr, b.data.ptr, m) + workspace = _cupy.empty(ws_size, dtype=dtype) + ormqr(cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, + mn_min, a.data.ptr, m, tau.data.ptr, b.data.ptr, m, + workspace.data.ptr, ws_size, dev_info.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + ormqr, dev_info) + + # trsm (Solves R * X = (Q^T * B)) + trsm(cublas_handle, _cublas.CUBLAS_SIDE_LEFT, + _cublas.CUBLAS_FILL_MODE_UPPER, no_trans, + _cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, + one.ctypes.data, a.data.ptr, m, b.data.ptr, m) + + return b[:n] + + else: # under-determined systems + a = a.conj().T.copy(order='F') + bb = b + out_shape = (n,) if b.ndim == 1 else (n, nrhs) + b = _cupy.zeros(out_shape, dtype=dtype, order='F') + b[:m] = bb + + # geqrf (QR decomposition, A^T = Q * R) + ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n) + workspace = _cupy.empty(ws_size, dtype=dtype) + geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr, + workspace.data.ptr, ws_size, dev_info.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + geqrf, dev_info) + + # trsm (Solves R^T * Z = B) + trsm(cublas_handle, _cublas.CUBLAS_SIDE_LEFT, + _cublas.CUBLAS_FILL_MODE_UPPER, trans, + _cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, + one.ctypes.data, a.data.ptr, n, b.data.ptr, n) + + # ormqr (Computes Q * Z) + ws_size = ormqr_helper( + cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs, + mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n) + workspace = _cupy.empty(ws_size, dtype=dtype) + ormqr(cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs, + mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n, + workspace.data.ptr, ws_size, dev_info.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + ormqr, dev_info) + + return b + + +def _batched_posv(a, b): + from cupy_backends.cuda.libs import cusolver as _cusolver + import cupyx.cusolver + + if not cupyx.cusolver.check_availability('potrsBatched'): + raise RuntimeError('potrsBatched is not available') + + dtype = _numpy.promote_types(a.dtype, b.dtype) + dtype = _numpy.promote_types(dtype, 'f') + + if dtype == 'f': + potrfBatched = _cusolver.spotrfBatched + potrsBatched = _cusolver.spotrsBatched + elif dtype == 'd': + potrfBatched = _cusolver.dpotrfBatched + potrsBatched = _cusolver.dpotrsBatched + elif dtype == 'F': + potrfBatched = _cusolver.cpotrfBatched + potrsBatched = _cusolver.cpotrsBatched + elif dtype == 'D': + potrfBatched = _cusolver.zpotrfBatched + potrsBatched = _cusolver.zpotrsBatched + else: + msg = ('dtype must be float32, float64, complex64 or complex128' + ' (actual: {})'.format(a.dtype)) + raise ValueError(msg) + + a = a.astype(dtype, order='C', copy=True) + ap = _cupy._core._mat_ptrs(a) + lda, n = a.shape[-2:] + batch_size = int(_numpy.prod(a.shape[:-2])) + + handle = _device.get_cusolver_handle() + uplo = _cublas.CUBLAS_FILL_MODE_LOWER + dev_info = _cupy.empty(batch_size, dtype=_numpy.int32) + + # Cholesky factorization + potrfBatched(handle, uplo, n, ap.data.ptr, lda, dev_info.data.ptr, + batch_size) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + potrfBatched, dev_info) + + b_shape = b.shape + b = b.conj().reshape(batch_size, n, -1).astype(dtype, order='C', copy=True) + bp = _cupy._core._mat_ptrs(b) + ldb, nrhs = b.shape[-2:] + dev_info = _cupy.empty(1, dtype=_numpy.int32) + + # NOTE: potrsBatched does not currently support nrhs > 1 (CUDA v10.2) + # Solve: A[i] * X[i] = B[i] + potrsBatched(handle, uplo, n, nrhs, ap.data.ptr, lda, bp.data.ptr, ldb, + dev_info.data.ptr, batch_size) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + potrsBatched, dev_info) + + # TODO: check if conj() is necessary when nrhs > 1 + return b.conj().reshape(b_shape) + + +def posv(a, b): + """Solve the linear equations A x = b via Cholesky factorization of A, + where A is a real symmetric or complex Hermitian positive-definite matrix. + + If matrix ``A`` is not positive definite, Cholesky factorization fails + and it raises an error. + + Note: For batch input, NRHS > 1 is not currently supported. + + Args: + a (cupy.ndarray): Array of real symmetric or complex hermitian + matrices with dimension (..., N, N). + b (cupy.ndarray): right-hand side (..., N) or (..., N, NRHS). + Returns: + x (cupy.ndarray): The solution (shape matches b). + """ + from cupy_backends.cuda.libs import cusolver as _cusolver + + _util = _cupy.linalg._util + _util._assert_cupy_array(a, b) + _util._assert_stacked_2d(a) + _util._assert_stacked_square(a) + + if a.ndim > 2: + return _batched_posv(a, b) + + dtype = _numpy.promote_types(a.dtype, b.dtype) + dtype = _numpy.promote_types(dtype, 'f') + + if dtype == 'f': + potrf = _cusolver.spotrf + potrf_bufferSize = _cusolver.spotrf_bufferSize + potrs = _cusolver.spotrs + elif dtype == 'd': + potrf = _cusolver.dpotrf + potrf_bufferSize = _cusolver.dpotrf_bufferSize + potrs = _cusolver.dpotrs + elif dtype == 'F': + potrf = _cusolver.cpotrf + potrf_bufferSize = _cusolver.cpotrf_bufferSize + potrs = _cusolver.cpotrs + elif dtype == 'D': + potrf = _cusolver.zpotrf + potrf_bufferSize = _cusolver.zpotrf_bufferSize + potrs = _cusolver.zpotrs + else: + msg = ('dtype must be float32, float64, complex64 or complex128' + ' (actual: {})'.format(a.dtype)) + raise ValueError(msg) + + a = a.astype(dtype, order='F', copy=True) + lda, n = a.shape + + handle = _device.get_cusolver_handle() + uplo = _cublas.CUBLAS_FILL_MODE_LOWER + dev_info = _cupy.empty(1, dtype=_numpy.int32) + + worksize = potrf_bufferSize(handle, uplo, n, a.data.ptr, lda) + workspace = _cupy.empty(worksize, dtype=dtype) + + # Cholesky factorization + potrf(handle, uplo, n, a.data.ptr, lda, workspace.data.ptr, + worksize, dev_info.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + potrf, dev_info) + + b_shape = b.shape + b = b.reshape(n, -1).astype(dtype, order='F', copy=True) + ldb, nrhs = b.shape + + # Solve: A * X = B + potrs(handle, uplo, n, nrhs, a.data.ptr, lda, b.data.ptr, ldb, + dev_info.data.ptr) + _cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed( + potrs, dev_info) + + return _cupy.ascontiguousarray(b.reshape(b_shape)) diff --git a/vllm/lib/python3.10/site-packages/cupyx/optimizing/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/optimizing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bffcb155be1d35a77bc534dc075f3c5c9e73ac9a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/optimizing/__init__.py @@ -0,0 +1 @@ +from cupyx.optimizing._optimize import optimize # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/optimizing/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/optimizing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4538e81d8e00b013fc126205265f55f1d6eaabc Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/optimizing/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/optimizing/__pycache__/_optimize.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/optimizing/__pycache__/_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26f0c89dee53ee674dfd3ac8f296d5b4f87a0eb3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/optimizing/__pycache__/_optimize.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/optimizing/_optimize.py b/vllm/lib/python3.10/site-packages/cupyx/optimizing/_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..1e63be7cc6b84e36a78b4531446668bbe8058ce8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/optimizing/_optimize.py @@ -0,0 +1,109 @@ +import contextlib +import math +import os +import warnings + + +try: + import optuna + _optuna_available = True +except ImportError: + _optuna_available = False + + +from cupy._core import _optimize_config +from cupyx import profiler + + +def _optimize( + optimize_config, target_func, suggest_func, + default_best, ignore_error=()): + assert isinstance(optimize_config, _optimize_config._OptimizationConfig) + assert callable(target_func) + assert callable(suggest_func) + + def objective(trial): + args = suggest_func(trial) + max_total_time = optimize_config.max_total_time_per_trial + try: + perf = profiler.benchmark( + target_func, args, max_duration=max_total_time) + return perf.gpu_times.mean() + except Exception as e: + if isinstance(e, ignore_error): + return math.inf + else: + raise e + + study = optuna.create_study() + study.enqueue_trial(default_best) + study.optimize( + objective, + n_trials=optimize_config.max_trials, + timeout=optimize_config.timeout) + return study.best_trial + + +@contextlib.contextmanager +def optimize(*, key=None, path=None, readonly=False, **config_dict): + """Context manager that optimizes kernel launch parameters. + + In this context, CuPy's routines find the best kernel launch parameter + values (e.g., the number of threads and blocks). The found values are + cached and reused with keys as the shapes, strides and dtypes of the + given inputs arrays. + + Args: + key (string or None): The cache key of optimizations. + path (string or None): The path to save optimization cache records. + When path is specified and exists, records will be loaded from + the path. When readonly option is set to ``False``, optimization + cache records will be saved to the path after the optimization. + readonly (bool): See the description of ``path`` option. + max_trials (int): The number of trials that defaults to 100. + timeout (float): + Stops study after the given number of seconds. Default is 1. + max_total_time_per_trial (float): + Repeats measuring the execution time of the routine for the + given number of seconds. Default is 0.1. + + Examples + -------- + >>> import cupy + >>> from cupyx import optimizing + >>> + >>> x = cupy.arange(100) + >>> with optimizing.optimize(): + ... cupy.sum(x) + ... + array(4950) + + .. note:: + Optuna (https://optuna.org) installation is required. + Currently it works for reduction operations only. + """ + if not _optuna_available: + raise RuntimeError( + 'Optuna is required to run optimization. ' + 'See https://optuna.org/ for the installation instructions.') + + old_context = _optimize_config.get_current_context() + context = _optimize_config.get_new_context(key, _optimize, config_dict) + _optimize_config.set_current_context(context) + + if path is not None: + if os.path.exists(path): + context.load(path) + elif readonly: + warnings.warn(''' +The specified path {} could not be found, and the readonly option is set. +The optimization results will never be stored. +'''.format(path)) + + try: + yield context + if path is not None and not readonly: + if context._is_dirty() or not os.path.exists(path): + context.save(path) + finally: + _optimize_config.set_current_context(old_context) diff --git a/vllm/lib/python3.10/site-packages/cupyx/profiler/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/profiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d922d1e405f200ee3d5f2591b9f4b1ca68aeae2a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/profiler/__init__.py @@ -0,0 +1,31 @@ +import contextlib as _contextlib +from cupy.cuda import runtime as _runtime +from cupyx.profiler._time import benchmark # NOQA +from cupyx.profiler._time_range import time_range # NOQA + + +@_contextlib.contextmanager +def profile(): + """Enable CUDA profiling during with statement. + + This function enables profiling on entering a with statement, and disables + profiling on leaving the statement. + + >>> with cupyx.profiler.profile(): + ... # do something you want to measure + ... pass + + .. note:: + When starting ``nvprof`` from the command line, manually setting + ``--profile-from-start off`` may be required for the desired behavior. + Likewise, when using ``nsys profile`` setting ``-c cudaProfilerApi`` + may be required. + + .. seealso:: :func:`cupy.cuda.runtime.profilerStart`, + :func:`cupy.cuda.runtime.profilerStop` + """ + _runtime.profilerStart() + try: + yield + finally: + _runtime.profilerStop() diff --git a/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..158e81e42bc32e522c29e2b3b3fed001e1312400 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/_time.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/_time.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00099015f54ae5e589ad9d0caff67dec9e444e0e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/_time.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/_time_range.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/_time_range.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c20506bb870e618184b6ecdf01acb894479a48b7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/profiler/__pycache__/_time_range.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/profiler/_time.py b/vllm/lib/python3.10/site-packages/cupyx/profiler/_time.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc6277984dcc2cf1196f9dfd5cce24d2bb4b472 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/profiler/_time.py @@ -0,0 +1,227 @@ +import math as _math +import time as _time + +import numpy as _numpy + +import cupy as _cupy +from cupy_backends.cuda.api import runtime + + +class _PerfCaseResult: + """ An obscure object encompassing timing results recorded by + :func:`~cupyx.profiler.benchmark`. Simple statistics can be obtained by + converting an instance of this class to a string. + + .. warning:: + This API is currently experimental and subject to change in future + releases. + + """ + + def __init__(self, name, ts, devices): + assert ts.ndim == 2 + assert ts.shape[0] == len(devices) + 1 + assert ts.shape[1] > 0 + self.name = name + self._ts = ts + self._devices = devices + + def __repr__(self) -> str: + """ Returns a string representation of the object. + + Returns: + str: A string representation of the object. + """ + return self.to_str(show_gpu=True) + + @property + def cpu_times(self) -> _numpy.ndarray: + """A :class:`numpy.ndarray` of shape ``(n_repeat,)``, holding times spent + on CPU in seconds. + + These values are delta of the host-side performance counter + (:func:`time.perf_counter`) between each repeat step. + """ # NOQA + return self._ts[0] + + @property + def gpu_times(self) -> _numpy.ndarray: + """A :class:`numpy.ndarray` of shape ``(len(devices), n_repeat)``, + holding times spent on GPU in seconds. + + These values are measured using ``cudaEventElapsedTime`` with events + recoreded before/after each repeat step. + """ + return self._ts[1:] + + @staticmethod + def _to_str_per_item(device_name, t): + assert t.ndim == 1 + assert t.size > 0 + t_us = t * 1e6 + + s = ' {}: {:9.03f} us'.format(device_name, t_us.mean()) + if t.size > 1: + s += ' +/- {:6.03f} (min: {:9.03f} / max: {:9.03f}) us'.format( + t_us.std(), t_us.min(), t_us.max()) + return s + + def to_str(self, show_gpu=False): + results = [self._to_str_per_item('CPU', self._ts[0])] + if show_gpu: + for i, d in enumerate(self._devices): + results.append( + self._to_str_per_item('GPU-{}'.format(d), + self._ts[1 + i])) + return '{:<20s}:{}'.format(self.name, ' '.join(results)) + + def __str__(self): + return self.to_str(show_gpu=True) + + +def benchmark( + func, args=(), kwargs={}, n_repeat=10000, *, + name=None, n_warmup=10, max_duration=_math.inf, devices=None): + """ Timing utility for measuring time spent by both CPU and GPU. + + This function is a very convenient helper for setting up a timing test. The + GPU time is properly recorded by synchronizing internal streams. As a + result, to time a multi-GPU function all participating devices must be + passed as the ``devices`` argument so that this helper knows which devices + to record. A simple example is given as follows: + + .. code-block:: py + + import cupy as cp + from cupyx.profiler import benchmark + + def f(a, b): + return 3 * cp.sin(-a) * b + + a = 0.5 - cp.random.random((100,)) + b = cp.random.random((100,)) + print(benchmark(f, (a, b), n_repeat=1000)) + + + Args: + func (callable): a callable object to be timed. + args (tuple): positional arguments to be passed to the callable. + kwargs (dict): keyword arguments to be passed to the callable. + n_repeat (int): number of times the callable is called. Increasing + this value would improve the collected statistics at the cost + of longer test time. + name (str): the function name to be reported. If not given, the + callable's ``__name__`` attribute is used. + n_warmup (int): number of times the callable is called. The warm-up + runs are not timed. + max_duration (float): the maximum time (in seconds) that the entire + test can use. If the taken time is longer than this limit, the test + is stopped and the statistics collected up to the breakpoint is + reported. + devices (tuple): a tuple of device IDs (int) that will be timed during + the timing test. If not given, the current device is used. + + Returns: + :class:`~cupyx.profiler._time._PerfCaseResult`: + an object collecting all test results. + + """ + + if name is None: + name = func.__name__ + + if devices is None: + devices = (_cupy.cuda.get_device_id(),) + + if not callable(func): + raise ValueError('`func` should be a callable object.') + if not isinstance(args, tuple): + raise ValueError('`args` should be of tuple type.') + if not isinstance(kwargs, dict): + raise ValueError('`kwargs` should be of dict type.') + if not isinstance(n_repeat, int): + raise ValueError('`n_repeat` should be an integer.') + if not isinstance(name, str): + raise ValueError('`name` should be a string.') + if not isinstance(n_warmup, int): + raise ValueError('`n_warmup` should be an integer.') + if not _numpy.isreal(max_duration): + raise ValueError('`max_duration` should be given in seconds') + if not isinstance(devices, tuple): + raise ValueError('`devices` should be of tuple type') + + return _repeat( + func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices) + + +def _repeat( + func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices): + + events_1 = [] + events_2 = [] + + for i in devices: + prev_device = runtime.getDevice() + try: + runtime.setDevice(i) + events_1.append(_cupy.cuda.stream.Event()) + events_2.append(_cupy.cuda.stream.Event()) + finally: + runtime.setDevice(prev_device) + + for i in range(n_warmup): + func(*args, **kwargs) + + for event, device in zip(events_1, devices): + prev_device = runtime.getDevice() + try: + runtime.setDevice(device) + event.record() + finally: + runtime.setDevice(prev_device) + event.synchronize() + + cpu_times = [] + gpu_times = [[] for i in events_1] + duration = 0 + for i in range(n_repeat): + for event, device in zip(events_1, devices): + prev_device = runtime.getDevice() + try: + runtime.setDevice(device) + event.record() + finally: + runtime.setDevice(prev_device) + + t1 = _time.perf_counter() + + func(*args, **kwargs) + + t2 = _time.perf_counter() + cpu_time = t2 - t1 + cpu_times.append(cpu_time) + + for event, device in zip(events_2, devices): + prev_device = runtime.getDevice() + try: + runtime.setDevice(device) + event.record() + finally: + runtime.setDevice(prev_device) + for event, device in zip(events_2, devices): + prev_device = runtime.getDevice() + try: + runtime.setDevice(device) + event.synchronize() + finally: + runtime.setDevice(prev_device) + for i, (ev1, ev2) in enumerate(zip(events_1, events_2)): + gpu_time = _cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3 + gpu_times[i].append(gpu_time) + + duration += _time.perf_counter() - t1 + if duration > max_duration: + break + + ts = _numpy.asarray([cpu_times] + gpu_times, dtype=_numpy.float64) + return _PerfCaseResult(name, ts, devices=devices) diff --git a/vllm/lib/python3.10/site-packages/cupyx/profiler/_time_range.py b/vllm/lib/python3.10/site-packages/cupyx/profiler/_time_range.py new file mode 100644 index 0000000000000000000000000000000000000000..c89e07703c53f6a8d23b800651a83117d9553646 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/profiler/_time_range.py @@ -0,0 +1,89 @@ +import functools + +from cupy import cuda +from cupy_backends.cuda.api import runtime + + +# Note: We use an (old-fashioned) custom object instead of +# @contextlib.contextmanager because for backward compatibility +# when used as a decorator this object needs to fetch the target +# function name. +class time_range: + """Mark function calls with ranges using NVTX/rocTX. This object can be + used either as a decorator or a context manager. + + When used as a decorator, the decorated function calls are marked as + ranges: + + >>> from cupyx.profiler import time_range + >>> @time_range() + ... def function_to_profile(): + ... pass + + When used as a context manager, it describes the enclosed block as a nested + range: + + >>> from cupyx.profiler import time_range + >>> with time_range('some range in green', color_id=0): + ... # do something you want to measure + ... pass + + The marked ranges are visible in the profiler (such as nvvp, nsys-ui, etc) + timeline. + + Args: + message (str): Name of a range. When used as a decorator, the default + is ``func.__name__``. + color_id: range color ID + argb_color: range color in ARGB (e.g. 0xFF00FF00 for green) + sync (bool): If ``True``, waits for completion of all outstanding + processing on GPU before calling :func:`cupy.cuda.nvtx.RangePush()` + or :func:`cupy.cuda.nvtx.RangePop()` + + .. seealso:: :func:`cupy.cuda.nvtx.RangePush`, + :func:`cupy.cuda.nvtx.RangePop` + """ + + def __init__( + self, message=None, color_id=None, argb_color=None, sync=False): + if not cuda.nvtx.available: + raise RuntimeError('nvtx is not installed') + + if color_id is not None and argb_color is not None: + raise ValueError( + 'Only either color_id or argb_color can be specified' + ) + self.message = message + self.color_id = color_id if color_id is not None else -1 + self.argb_color = argb_color + self.sync = sync + + def __enter__(self): + if self.message is None: + raise ValueError( + 'when used as a context manager, the message argument cannot ' + 'be None') + if self.sync: + runtime.deviceSynchronize() + if self.argb_color is not None: + cuda.nvtx.RangePushC(self.message, self.argb_color) + else: + cuda.nvtx.RangePush(self.message, self.color_id) + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.sync: + runtime.deviceSynchronize() + cuda.nvtx.RangePop() + + def _recreate_cm(self, message): + if self.message is None: + self.message = message + return self + + def __call__(self, func): + @functools.wraps(func) + def inner(*args, **kwargs): + with self._recreate_cm(func.__name__): + return func(*args, **kwargs) + return inner diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/signal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f63ff031a15c101b4233ac86304ee4805bd839f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/signal/__init__.py @@ -0,0 +1,13 @@ +from cupyx.signal._acoustics import complex_cepstrum, real_cepstrum # NOQA +from cupyx.signal._acoustics import inverse_complex_cepstrum # NOQA +from cupyx.signal._acoustics import minimum_phase # NOQA +from cupyx.signal._convolution import convolve1d2o # NOQA +from cupyx.signal._convolution import convolve1d3o # NOQA +from cupyx.signal._filtering import channelize_poly # NOQA +from cupyx.signal._filtering import firfilter, firfilter2, firfilter_zi # NOQA +from cupyx.signal._filtering import freq_shift # NOQA +from cupyx.signal._radartools import pulse_compression # NOQA +from cupyx.signal._radartools import pulse_doppler # NOQA +from cupyx.signal._radartools import cfar_alpha # NOQA +from cupyx.signal._radartools import ca_cfar # NOQA +from cupyx.signal._radartools import mvdr # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_convolution/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/signal/_convolution/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfbfc5f4b8126e9baa29d5826e9b1e3e41f5f3e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/signal/_convolution/__init__.py @@ -0,0 +1,2 @@ +from cupyx.signal._convolution._convolve import convolve1d2o # NOQA +from cupyx.signal._convolution._convolve import convolve1d3o # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_convolution/_convolve.py b/vllm/lib/python3.10/site-packages/cupyx/signal/_convolution/_convolve.py new file mode 100644 index 0000000000000000000000000000000000000000..ac4e2b28ad720e05a64f811d6427517e5d5f0aba --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/signal/_convolution/_convolve.py @@ -0,0 +1,223 @@ +""" +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import cupy + + +_convolve1d2o_kernel = cupy.ElementwiseKernel( + 'raw T in1, raw T in2, int32 W, int32 H', 'T out', + """ + T temp {}; + for (int x = 0; x < W; x++) { + for (int y = 0; y < H; y++) { + temp += in1[i + W - x - 1] * in1[i + H - y - 1] * in2[H * x + y]; + } + } + out = temp; + """, + "cupy_convolved2o", +) + + +def _convolve1d2o(in1, in2, mode): + assert mode == "valid" + out_dim = in1.shape[0] - max(in2.shape) + 1 + dtype = cupy.result_type(in1, in2) + out = cupy.empty(out_dim, dtype=dtype) + _convolve1d2o_kernel(in1, in2, *in2.shape, out) + return out + + +def convolve1d2o(in1, in2, mode='valid', method='direct'): + """ + Convolve a 1-dimensional arrays with a 2nd order filter. + This results in a second order convolution. + + Convolve `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. + in2 : array_like + Second input. Should have the same number of dimensions as `in1`. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the convolution. + + ``direct`` + The convolution is determined directly from sums, the definition of + convolution. + ``fft`` + The Fourier Transform is used to perform the convolution by calling + `fftconvolve`. + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). + + Returns + ------- + out : ndarray + A 1-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve + convolve1d2o + convolve1d3o + + Examples + -------- + Convolution of a 2nd order filter on a 1d signal + + >>> import cusignal as cs + >>> import numpy as np + >>> d = 50 + >>> a = np.random.uniform(-1,1,(200)) + >>> b = np.random.uniform(-1,1,(d,d)) + >>> c = cs.convolve1d2o(a,b) + + """ + + if in1.ndim != 1: + raise ValueError('in1 should have one dimension') + if in2.ndim != 2: + raise ValueError('in2 should have three dimension') + + if mode in ["same", "full"]: + raise NotImplementedError("Mode == {} not implemented".format(mode)) + + if method == "direct": + return _convolve1d2o(in1, in2, mode) + else: + raise NotImplementedError("Only Direct method implemented") + + +_convolve1d3o_kernel = cupy.ElementwiseKernel( + 'raw T in1, raw T in2, int32 W, int32 H, int32 D', 'T out', + """ + T temp {}; + for (int x = 0; x < W; x++) { + for (int y = 0; y < H; y++) { + for (int z = 0; z < D; z++) { + temp += in1[i + W - x - 1] * in1[i + H - y - 1] * + in1[i + D - z - 1] * in2[(H * x + y) * D + z]; + } + } + } + out = temp; + """, + "cupy_convolved3o", +) + + +def _convolve1d3o(in1, in2, mode): + assert mode == "valid" + out_dim = in1.shape[0] - max(in2.shape) + 1 + dtype = cupy.result_type(in1, in2) + out = cupy.empty(out_dim, dtype=dtype) + _convolve1d3o_kernel(in1, in2, *in2.shape, out) + return out + + +def convolve1d3o(in1, in2, mode='valid', method='direct'): + """ + Convolve a 1-dimensional array with a 3rd order filter. + This results in a third order convolution. + + Convolve `in1` and `in2`, with the output size determined by the + `mode` argument. + + Parameters + ---------- + in1 : array_like + First input. Should have one dimension. + in2 : array_like + Second input. Should have three dimensions. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output: + + ``full`` + The output is the full discrete linear convolution + of the inputs. (Default) + ``valid`` + The output consists only of those elements that do not + rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + must be at least as large as the other in every dimension. + ``same`` + The output is the same size as `in1`, centered + with respect to the 'full' output. + method : str {'auto', 'direct', 'fft'}, optional + A string indicating which method to use to calculate the convolution. + + ``direct`` + The convolution is determined directly from sums, the definition of + convolution. + ``fft`` + The Fourier Transform is used to perform the convolution by calling + `fftconvolve`. + ``auto`` + Automatically chooses direct or Fourier method based on an estimate + of which is faster (default). + + Returns + ------- + out : ndarray + A 1-dimensional array containing a subset of the discrete linear + convolution of `in1` with `in2`. + + See Also + -------- + convolve + convolve1d2o + convolve1d3o + """ + + if in1.ndim != 1: + raise ValueError('in1 should have one dimension') + if in2.ndim != 3: + raise ValueError('in2 should have three dimension') + + if mode in ["same", "full"]: + raise NotImplementedError("Mode == {} not implemented".format(mode)) + + if method == "direct": + return _convolve1d3o(in1, in2, mode) + else: + raise NotImplementedError("Only Direct method implemented") diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9897b2b51a15053a40b372073c6655b9a60989e6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__init__.py @@ -0,0 +1,5 @@ +from cupyx.signal._radartools._beamformers import mvdr # NOQA +from cupyx.signal._radartools._radartools import pulse_compression # NOQA +from cupyx.signal._radartools._radartools import pulse_doppler # NOQA +from cupyx.signal._radartools._radartools import cfar_alpha # NOQA +from cupyx.signal._radartools._radartools import ca_cfar # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295d758cfc3bdff8b0cd1730f028bf1bb9c61fd7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/_beamformers.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/_beamformers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2744a441c3db8c992fe71b7473fd249b71d1d43e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/_beamformers.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/_radartools.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/_radartools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f2a51ed1ce74c96eb3adc19c0e930f0dbb03e5f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/__pycache__/_radartools.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/_beamformers.py b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/_beamformers.py new file mode 100644 index 0000000000000000000000000000000000000000..f8496e6eb417c212a2dec0b7b3097e5413d26d6a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/_beamformers.py @@ -0,0 +1,60 @@ +# Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import cupy as cp + + +def mvdr(x, sv, calc_cov=True): + """ + Minimum variance distortionless response (MVDR) beamformer weights + + Parameters + ---------- + x : ndarray + Received signal or input covariance matrix, assume 2D array with + size [num_sensors, num_samples] + + sv: ndarray + Steering vector, assume 1D array with size [num_sensors, 1] + + calc_cov : bool + Determine whether to calculate covariance matrix. Simply put, calc_cov + defines whether x input is made of sensor/observation data or is + a precalculated covariance matrix + + Note: Unlike MATLAB where input matrix x is of size MxN where N represents + the number of array elements, we assume row-major formatted data where each + row is assumed to be complex-valued data from a given sensor (i.e. NxM) + """ + if x.shape[0] > x.shape[1]: + raise ValueError( + "Matrix has more sensors than samples. Consider \ + transposing and remember cuSignal is row-major, unlike MATLAB" + ) + + if x.shape[0] != sv.shape[0]: + raise ValueError("Steering Vector and input data do not align") + + if calc_cov: + x = cp.cov(x, dtype=x.dtype) + + wB = cp.linalg.inv(x).dot(sv) + wA = sv.conj().dot(wB) # 1x1 scalar + return wB / wA diff --git a/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/_radartools.py b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/_radartools.py new file mode 100644 index 0000000000000000000000000000000000000000..dd3430a308f9cd0606a0273bc01adff28cca4e25 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/signal/_radartools/_radartools.py @@ -0,0 +1,309 @@ +""" +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import cupy +from cupyx.scipy.signal import windows + + +def _pulse_preprocess(x, normalize, window): + if window is not None: + n = x.shape[-1] + if callable(window): + w = window(cupy.fft.fftfreq(n).astype(x.dtype)) + elif isinstance(window, cupy.ndarray): + if window.shape != (n,): + raise ValueError("window must have the same length as data") + w = window + else: + w = windows.get_window(window, n, False).astype(x.dtype) + x = x * w + + if normalize: + x = x / cupy.linalg.norm(x) + + return x + + +def pulse_compression(x, template, normalize=False, window=None, nfft=None): + """ + Pulse Compression is used to increase the range resolution and SNR + by performing matched filtering of the transmitted pulse (template) + with the received signal (x) + + Parameters + ---------- + x : ndarray + Received signal, assume 2D array with [num_pulses, sample_per_pulse] + + template : ndarray + Transmitted signal, assume 1D array + + normalize : bool + Normalize transmitted signal + + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. + + nfft : int, size of FFT for pulse compression. Default is number of + samples per pulse + + Returns + ------- + compressedIQ : ndarray + Pulse compressed output + """ + num_pulses, samples_per_pulse = x.shape + dtype = cupy.result_type(x, template) + + if nfft is None: + nfft = samples_per_pulse + + t = _pulse_preprocess(template, normalize, window) + fft_x = cupy.fft.fft(x, nfft) + fft_t = cupy.fft.fft(t, nfft) + out = cupy.fft.ifft(fft_x * fft_t.conj(), nfft) + if dtype.kind != 'c': + out = out.real + return out + + +def pulse_doppler(x, window=None, nfft=None): + """ + Pulse doppler processing yields a range/doppler data matrix that represents + moving target data that's separated from clutter. An estimation of the + doppler shift can also be obtained from pulse doppler processing. FFT taken + across slow-time (pulse) dimension. + + Parameters + ---------- + x : ndarray + Received signal, assume 2D array with [num_pulses, sample_per_pulse] + + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. + + nfft : int, size of FFT for pulse compression. Default is number of + samples per pulse + + Returns + ------- + pd_dataMatrix : ndarray + Pulse-doppler output (range/doppler matrix) + """ + num_pulses, samples_per_pulse = x.shape + + if nfft is None: + nfft = num_pulses + + xT = _pulse_preprocess(x.T, False, window) + return cupy.fft.fft(xT, nfft).T + + +def cfar_alpha(pfa, N): + """ + Computes the value of alpha corresponding to a given probability + of false alarm and number of reference cells N. + + Parameters + ---------- + pfa : float + Probability of false alarm. + + N : int + Number of reference cells. + + Returns + ------- + alpha : float + Alpha value. + """ + return N * (pfa ** (-1.0 / N) - 1) + + +def ca_cfar(array, guard_cells, reference_cells, pfa=1e-3): + """ + Computes the cell-averaged constant false alarm rate (CA CFAR) detector + threshold and returns for a given array. + Parameters + ---------- + array : ndarray + Array containing data to be processed. + guard_cells_x : int + One-sided guard cell count in the first dimension. + guard_cells_y : int + One-sided guard cell count in the second dimension. + reference_cells_x : int + one-sided reference cell count in the first dimension. + reference_cells_y : int + one-sided reference cell count in the second dimension. + pfa : float + Probability of false alarm. + Returns + ------- + threshold : ndarray + CFAR threshold + return : ndarray + CFAR detections + """ + shape = array.shape + if len(shape) > 2: + raise TypeError('Only 1D and 2D arrays are currently supported.') + mask = cupy.zeros(shape, dtype=cupy.float32) + + if len(shape) == 1: + if len(array) <= 2 * guard_cells + 2 * reference_cells: + raise ValueError('Array too small for given parameters') + intermediate = cupy.cumsum(array, axis=0, dtype=cupy.float32) + N = 2 * reference_cells + alpha = cfar_alpha(pfa, N) + tpb = (32,) + bpg = ((len(array) - 2 * reference_cells - 2 * guard_cells + + tpb[0] - 1) // tpb[0],) + _ca_cfar_1d_kernel(bpg, tpb, (array, intermediate, mask, + len(array), N, cupy.float32(alpha), + guard_cells, reference_cells)) + elif len(shape) == 2: + if len(guard_cells) != 2 or len(reference_cells) != 2: + raise TypeError('Guard and reference cells must be two ' + 'dimensional.') + guard_cells_x, guard_cells_y = guard_cells + reference_cells_x, reference_cells_y = reference_cells + if shape[0] - 2 * guard_cells_x - 2 * reference_cells_x <= 0: + raise ValueError('Array first dimension too small for given ' + 'parameters.') + if shape[1] - 2 * guard_cells_y - 2 * reference_cells_y <= 0: + raise ValueError('Array second dimension too small for given ' + 'parameters.') + intermediate = cupy.cumsum(array, axis=0, dtype=cupy.float32) + intermediate = cupy.cumsum(intermediate, axis=1, dtype=cupy.float32) + N = 2 * reference_cells_x * (2 * reference_cells_y + + 2 * guard_cells_y + 1) + N += 2 * (2 * guard_cells_x + 1) * reference_cells_y + alpha = cfar_alpha(pfa, N) + tpb = (8, 8) + bpg_x = (shape[0] - 2 * (reference_cells_x + guard_cells_x) + tpb[0] - + 1) // tpb[0] + bpg_y = (shape[1] - 2 * (reference_cells_y + guard_cells_y) + tpb[1] - + 1) // tpb[1] + bpg = (bpg_x, bpg_y) + _ca_cfar_2d_kernel(bpg, tpb, (array, intermediate, mask, + shape[0], shape[1], N, cupy.float32(alpha), + guard_cells_x, guard_cells_y, reference_cells_x, + reference_cells_y)) + return (mask, array - mask > 0) + + +_ca_cfar_2d_kernel = cupy.RawKernel(r''' +extern "C" __global__ void +_ca_cfar_2d_kernel(float * array, float * intermediate, float * mask, + int width, int height, int N, float alpha, + int guard_cells_x, int guard_cells_y, + int reference_cells_x, int reference_cells_y) +{ + int i_init = threadIdx.x+blockIdx.x*blockDim.x; + int j_init = threadIdx.y+blockIdx.y*blockDim.y; + int i, j, x, y, offset; + int tro, tlo, blo, bro, tri, tli, bli, bri; + float outer_area, inner_area, T; + for (i=i_init; i0 && j>0){ + outer_area = intermediate[tro]-intermediate[tlo]- + intermediate[bro]+intermediate[blo]; + } else if (i == 0 && j > 0){ + outer_area = intermediate[tro]-intermediate[bro]; + } else if (i > 0 && j == 0){ + outer_area = intermediate[tro]-intermediate[tlo]; + } else if (i == 0 && j == 0){ + outer_area = intermediate[tro]; + } + inner_area = intermediate[tri]-intermediate[tli]- + intermediate[bri]+intermediate[bli]; + T = outer_area-inner_area; + T = alpha/N*T; + mask[offset] = T; + } + } +} +''', '_ca_cfar_2d_kernel') + + +_ca_cfar_1d_kernel = cupy.RawKernel(r''' +extern "C" __global__ void +_ca_cfar_1d_kernel(float * array, float * intermediate, float * mask, + int width, int N, float alpha, + int guard_cells, int reference_cells) +{ + int i_init = threadIdx.x+blockIdx.x*blockDim.x; + int i, x; + int br, bl, sr, sl; + float big_area, small_area, T; + for (i=i_init; i0){ + big_area = intermediate[br]-intermediate[bl]; + } else{ + big_area = intermediate[br]; + } + small_area = intermediate[sr]-intermediate[sl]; + T = big_area-small_area; + T = alpha/N*T; + mask[x] = T; + } +} +''', '_ca_cfar_1d_kernel') diff --git a/vllm/lib/python3.10/site-packages/cupyx/time.py b/vllm/lib/python3.10/site-packages/cupyx/time.py new file mode 100644 index 0000000000000000000000000000000000000000..2fa3b7d132abcfc9ca6311bd9f5f92e667fc4e95 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/time.py @@ -0,0 +1,90 @@ +import math as _math +import warnings as _warnings + +import numpy as _numpy + +import cupy as _cupy +from cupyx.profiler._time import _repeat, _PerfCaseResult # for tests # NOQA + + +# TODO(leofang): remove this function in CuPy v11 +def repeat( + func, args=(), kwargs={}, n_repeat=10000, *, + name=None, n_warmup=10, max_duration=_math.inf, devices=None): + """ Timing utility for measuring time spent by both CPU and GPU. + + This function is a very convenient helper for setting up a timing test. The + GPU time is properly recorded by synchronizing internal streams. As a + result, to time a multi-GPU function all participating devices must be + passed as the ``devices`` argument so that this helper knows which devices + to record. A simple example is given as follows: + + .. code-block:: py + + import cupy as cp + from cupyx.time import repeat + + def f(a, b): + return 3 * cp.sin(-a) * b + + a = 0.5 - cp.random.random((100,)) + b = cp.random.random((100,)) + print(repeat(f, (a, b), n_repeat=1000)) + + + Args: + func (callable): a callable object to be timed. + args (tuple): positional arguments to be passed to the callable. + kwargs (dict): keyword arguments to be passed to the callable. + n_repeat (int): number of times the callable is called. Increasing + this value would improve the collected statistics at the cost + of longer test time. + name (str): the function name to be reported. If not given, the + callable's ``__name__`` attribute is used. + n_warmup (int): number of times the callable is called. The warm-up + runs are not timed. + max_duration (float): the maximum time (in seconds) that the entire + test can use. If the taken time is longer than this limit, the test + is stopped and the statistics collected up to the breakpoint is + reported. + devices (tuple): a tuple of device IDs (int) that will be timed during + the timing test. If not given, the current device is used. + + Returns: + :class:`~cupyx.profiler._time._PerfCaseResult`: + an object collecting all test results. + + .. warning:: + This API is moved to :func:`cupyx.profiler.benchmark` since CuPy v10. + Access through ``cupyx.time`` is deprecated. + """ + + _warnings.warn( + 'cupyx.time.repeat has been moved to cupyx.profiler.benchmark since ' + 'CuPy v10. Access through cupyx.time is deprecated and will be ' + 'removed in the future.') + if name is None: + name = func.__name__ + + if devices is None: + devices = (_cupy.cuda.get_device_id(),) + + if not callable(func): + raise ValueError('`func` should be a callable object.') + if not isinstance(args, tuple): + raise ValueError('`args` should be of tuple type.') + if not isinstance(kwargs, dict): + raise ValueError('`kwargs` should be of dict type.') + if not isinstance(n_repeat, int): + raise ValueError('`n_repeat` should be an integer.') + if not isinstance(name, str): + raise ValueError('`name` should be a string.') + if not isinstance(n_warmup, int): + raise ValueError('`n_warmup` should be an integer.') + if not _numpy.isreal(max_duration): + raise ValueError('`max_duration` should be given in seconds') + if not isinstance(devices, tuple): + raise ValueError('`devices` should be of tuple type') + + return _repeat( + func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices) diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36b5fdbab6139eaf7ff37334cfbe6002bec9472d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0200be244fb1fa971c5246ca1296a470e8666bef Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d948f2b88985029cdbe2c54f08dfa997d8943ca Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac7fcbc1841a3f930536aec4be7057153f31f2e3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..027f4616831a2e8359a134aca8623e526cf22258 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e06cb9a955369eafae6427ec71fa871d2026bc96 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d7a036d811795ddcfaa4196f9e60904fa42959 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e22cd57eef9472c06d4d935d219fdd602a60426 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..275157954e0ffacaa6a8fa47c7aa75eafaf6bcf7 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a74d43b1dfa62e3caa6606e83ebe214522a2014 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e57167ecd82adc73ea03f62f1c811100e8f01c2c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e735e855b991cf4b641df3dca477c5041e6d835 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d5ed43cfcbfa986edf879effd69773a56f285c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fade0370fd3ee98b2a1a6459b55c39f8e96f9ac1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/json.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c22a6dc3075f9e61da81f50d8f857ea0df39a5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/json.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fa1c3098b592f449828da453ad33c8ef8c043f5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0add68cc4c00a7467606a400e81a860a70c1cddd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..503dde40354dc4eb05226743cc5bd06a9e69db81 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4757c20ecd5ae90a0774188eb60b639490ab811a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c229423e12efd9879132fb3d3662fea411936a67 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2fa27e289faaa5da3c57a77e5f79ef404ceb620 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b99916bdf9e7a442681e19dfe381e52a29a556fc Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7ff444467fc3478310df7a25c86dabe6a0384c6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..584e25da5513528f137cea50beef189b6cfc8d65 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3a8b551a64d18863fa0bea84fe985f817540b77 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcbe0fb6841dc0e8050e067fbe29cc897963ab81 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2dfa04901380aa21a506f56c410e58a602357f3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..44181420fdc6c7b8e9e707d8b0bd8018417efc34 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py @@ -0,0 +1,287 @@ +import os +from hashlib import md5 + +import pytest + +from fsspec.implementations.local import LocalFileSystem +from fsspec.tests.abstract.copy import AbstractCopyTests # noqa: F401 +from fsspec.tests.abstract.get import AbstractGetTests # noqa: F401 +from fsspec.tests.abstract.put import AbstractPutTests # noqa: F401 + + +class BaseAbstractFixtures: + """ + Abstract base class containing fixtures that are used by but never need to + be overridden in derived filesystem-specific classes to run the abstract + tests on such filesystems. + """ + + @pytest.fixture + def fs_bulk_operations_scenario_0(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_glob_edge_cases_files(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_dir_and_file_with_same_name_prefix(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_10_files_with_hashed_names(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_target(self, fs, fs_join, fs_path): + """ + Return name of remote directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = fs_join(fs_path, "target") + yield target + if fs.exists(target): + fs.rm(target, recursive=True) + + @pytest.fixture + def local_bulk_operations_scenario_0(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_glob_edge_cases_files(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_dir_and_file_with_same_name_prefix( + self, local_fs, local_join, local_path + ): + """ + Scenario on local filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix( + local_fs, local_join, local_path + ) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_10_files_with_hashed_names(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_target(self, local_fs, local_join, local_path): + """ + Return name of local directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = local_join(local_path, "target") + yield target + if local_fs.exists(target): + local_fs.rm(target, recursive=True) + + def _glob_edge_cases_files(self, some_fs, some_join, some_path): + """ + Scenario that is used for glob edge cases cp/get/put tests. + Creates the following directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + ├── 📁 subdir0 + │ ├── 📄 subfile1 + │ ├── 📄 subfile2 + │ └── 📁 nesteddir + │ └── 📄 nestedfile + └── 📁 subdir1 + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + + for subdir_idx in range(2): + subdir = some_join(source, f"subdir{subdir_idx}") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + + return source + + def _bulk_operations_scenario_0(self, some_fs, some_join, some_path): + """ + Scenario that is used for many cp/get/put tests. Creates the following + directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + └── 📁 subdir + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + return source + + def _dir_and_file_with_same_name_prefix(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put on directory and file with + the same name prefixes. Creates the following directory and file structure: + + 📁 source + ├── 📄 subdir.txt + └── 📁 subdir + └── 📄 subfile.txt + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + file = some_join(source, "subdir.txt") + subfile = some_join(subdir, "subfile.txt") + some_fs.makedirs(subdir) + some_fs.touch(file) + some_fs.touch(subfile) + return source + + def _10_files_with_hashed_names(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put files order when source and + destination are lists. Creates the following directory and file structure: + + 📁 source + └── 📄 {hashed([0-9])}.txt + """ + source = some_join(some_path, "source") + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + path = some_join(source, f"{hashed_i}.txt") + some_fs.pipe(path=path, value=f"{i}".encode("utf-8")) + return source + + +class AbstractFixtures(BaseAbstractFixtures): + """ + Abstract base class containing fixtures that may be overridden in derived + filesystem-specific classes to run the abstract tests on such filesystems. + + For any particular filesystem some of these fixtures must be overridden, + such as ``fs`` and ``fs_path``, and others may be overridden if the + default functions here are not appropriate, such as ``fs_join``. + """ + + @pytest.fixture + def fs(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture + def fs_join(self): + """ + Return a function that joins its arguments together into a path. + + Most fsspec implementations join paths in a platform-dependent way, + but some will override this to always use a forward slash. + """ + return os.path.join + + @pytest.fixture + def fs_path(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture(scope="class") + def local_fs(self): + # Maybe need an option for auto_mkdir=False? This is only relevant + # for certain implementations. + return LocalFileSystem(auto_mkdir=True) + + @pytest.fixture + def local_join(self): + """ + Return a function that joins its arguments together into a path, on + the local filesystem. + """ + return os.path.join + + @pytest.fixture + def local_path(self, tmpdir): + return tmpdir + + @pytest.fixture + def supports_empty_directories(self): + """ + Return whether this implementation supports empty directories. + """ + return True + + @pytest.fixture + def fs_sanitize_path(self): + return lambda x: x diff --git a/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/common.py b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/common.py new file mode 100644 index 0000000000000000000000000000000000000000..22e7c4140404ab2a8928689721419cf05c2760b9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/common.py @@ -0,0 +1,175 @@ +GLOB_EDGE_CASES_TESTS = { + "argnames": ("path", "recursive", "maxdepth", "expected"), + "argvalues": [ + ("fil?1", False, None, ["file1"]), + ("fil?1", True, None, ["file1"]), + ("file[1-2]", False, None, ["file1", "file2"]), + ("file[1-2]", True, None, ["file1", "file2"]), + ("*", False, None, ["file1", "file2"]), + ( + "*", + True, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("*", True, 1, ["file1", "file2"]), + ( + "*", + True, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ("*1", False, None, ["file1"]), + ( + "*1", + True, + None, + [ + "file1", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("*1", True, 2, ["file1", "subdir1/subfile1", "subdir1/subfile2"]), + ( + "**", + False, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "**", + True, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("**", True, 1, ["file1", "file2"]), + ( + "**", + True, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "**", + False, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ("**/*1", False, None, ["file1", "subdir0/subfile1", "subdir1/subfile1"]), + ( + "**/*1", + True, + None, + [ + "file1", + "subdir0/subfile1", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("**/*1", True, 1, ["file1"]), + ( + "**/*1", + True, + 2, + ["file1", "subdir0/subfile1", "subdir1/subfile1", "subdir1/subfile2"], + ), + ("**/*1", False, 2, ["file1", "subdir0/subfile1", "subdir1/subfile1"]), + ("**/subdir0", False, None, []), + ("**/subdir0", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]), + ("**/subdir0/nested*", False, 2, []), + ("**/subdir0/nested*", True, 2, ["nestedfile"]), + ("subdir[1-2]", False, None, []), + ("subdir[1-2]", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]), + ("subdir[1-2]", True, 2, ["subfile1", "subfile2"]), + ("subdir[0-1]", False, None, []), + ( + "subdir[0-1]", + True, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "subdir[0-1]/*fil[e]*", + False, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ( + "subdir[0-1]/*fil[e]*", + True, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ], +} diff --git a/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/get.py b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/get.py new file mode 100644 index 0000000000000000000000000000000000000000..851ab81ee581e74cac41c64c83ef0af75826d6b0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/get.py @@ -0,0 +1,587 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.implementations.local import make_path_posix +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractGetTests: + def test_get_file_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1a + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + assert local_fs.isdir(target) + + target_file2 = local_join(target, "file2") + target_subfile1 = local_join(target, "subfile1") + + # Copy from source directory + fs.get(fs_join(source, "file2"), target) + assert local_fs.isfile(target_file2) + + # Copy from sub directory + fs.get(fs_join(source, "subdir", "subfile1"), target) + assert local_fs.isfile(target_subfile1) + + # Remove copied files + local_fs.rm([target_file2, target_subfile1]) + assert not local_fs.exists(target_file2) + assert not local_fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.get(fs_join(source, "file2"), target + "/") + assert local_fs.isdir(target) + assert local_fs.isfile(target_file2) + + fs.get(fs_join(source, "subdir", "subfile1"), target + "/") + assert local_fs.isfile(target_subfile1) + + def test_get_file_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1b + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get( + fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/") + ) # Note trailing slash + + assert local_fs.isdir(target) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + + def test_get_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1c + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile")) + assert local_fs.isfile(local_join(target, "newfile")) + + def test_get_file_to_file_in_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1d + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get( + fs_join(source, "subdir", "subfile1"), + local_join(target, "newdir", "newfile"), + ) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "newfile")) + + def test_get_directory_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1e + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + assert local_fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.get(s, t) + assert local_fs.ls(target) == [] + + # With recursive + fs.get(s, t, recursive=True) + if source_slash: + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert local_fs.isdir(local_join(target, "nesteddir")) + assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + local_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile1")) + assert local_fs.isfile(local_join(target, "subdir", "subfile2")) + assert local_fs.isdir(local_join(target, "subdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "subdir", "nesteddir", "nestedfile") + ) + + local_fs.rm(local_join(target, "subdir"), recursive=True) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get(s, t, recursive=True, maxdepth=1) + if source_slash: + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.exists(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile1")) + assert local_fs.isfile(local_join(target, "subdir", "subfile2")) + assert not local_fs.exists(local_join(target, "subdir", "nesteddir")) + + local_fs.rm(local_join(target, "subdir"), recursive=True) + assert local_fs.ls(target) == [] + + def test_get_directory_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1f + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = local_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.get(s, t) + assert local_fs.ls(target) == [] + + # With recursive + fs.get(s, t, recursive=True) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get(s, t, recursive=True, maxdepth=1) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + def test_get_glob_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1g + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.get(fs_join(source, "subdir", "*"), t) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.isdir(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.get(fs_join(source, "subdir", glob), t, recursive=recursive) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert local_fs.isdir(local_join(target, "nesteddir")) + assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + local_join(target, "nesteddir"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.exists(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + def test_get_glob_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1h + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.get(fs_join(source, "subdir", "*"), t) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert local_fs.ls(target) == [] + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.get(fs_join(source, "subdir", glob), t, recursive=recursive) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.get( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_fs.ls(target, detail=False), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_get_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_glob_edge_cases_files, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1g + source = fs_glob_edge_cases_files + + target = local_target + + for new_dir, target_slash in product([True, False], [True, False]): + local_fs.mkdir(target) + + t = local_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.get(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = local_fs.find(target) + if new_dir: + prefixed_expected = [ + make_path_posix(local_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + make_path_posix(local_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + local_fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_get_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 2a + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.get(source_files, t) + assert local_fs.isfile(local_join(target, "file1")) + assert local_fs.isfile(local_join(target, "file2")) + assert local_fs.isfile(local_join(target, "subfile1")) + + local_fs.rm( + [ + local_join(target, "file1"), + local_join(target, "file2"), + local_join(target, "subfile1"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + def test_get_list_of_files_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 2b + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "file1")) + assert local_fs.isfile(local_join(target, "newdir", "file2")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + + def test_get_directory_recursive( + self, fs, fs_join, fs_path, local_fs, local_join, local_target + ): + # https://github.com/fsspec/filesystem_spec/issues/1062 + # Recursive cp/get/put of source directory into non-existent target directory. + src = fs_join(fs_path, "src") + src_file = fs_join(src, "file") + fs.mkdir(src) + fs.touch(src_file) + + target = local_target + + # get without slash + assert not local_fs.exists(target) + for loop in range(2): + fs.get(src, target, recursive=True) + assert local_fs.isdir(target) + + if loop == 0: + assert local_fs.isfile(local_join(target, "file")) + assert not local_fs.exists(local_join(target, "src")) + else: + assert local_fs.isfile(local_join(target, "file")) + assert local_fs.isdir(local_join(target, "src")) + assert local_fs.isfile(local_join(target, "src", "file")) + + local_fs.rm(target, recursive=True) + + # get with slash + assert not local_fs.exists(target) + for loop in range(2): + fs.get(src + "/", target, recursive=True) + assert local_fs.isdir(target) + assert local_fs.isfile(local_join(target, "file")) + assert not local_fs.exists(local_join(target, "src")) + + def test_get_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + local_fs, + local_join, + local_target, + fs_dir_and_file_with_same_name_prefix, + ): + # Create the test dirs + source = fs_dir_and_file_with_same_name_prefix + target = local_target + + # Test without glob + fs.get(fs_join(source, "subdir"), target, recursive=True) + + assert local_fs.isfile(local_join(target, "subfile.txt")) + assert not local_fs.isfile(local_join(target, "subdir.txt")) + + local_fs.rm([local_join(target, "subfile.txt")]) + assert local_fs.ls(target) == [] + + # Test with glob + fs.get(fs_join(source, "subdir*"), target, recursive=True) + + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile.txt")) + assert local_fs.isfile(local_join(target, "subdir.txt")) + + def test_get_with_source_and_destination_as_list( + self, + fs, + fs_join, + local_fs, + local_join, + local_target, + fs_10_files_with_hashed_names, + ): + # Create the test dir + source = fs_10_files_with_hashed_names + target = local_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(fs_join(source, f"{hashed_i}.txt")) + destination_files.append( + make_path_posix(local_join(target, f"{hashed_i}.txt")) + ) + + # Copy and assert order was kept + fs.get(rpath=source_files, lpath=destination_files) + + for i in range(10): + file_content = local_fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/mv.py b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/mv.py new file mode 100644 index 0000000000000000000000000000000000000000..39f6caa3de815e024fa84de2acecc986c823ed29 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/fsspec/tests/abstract/mv.py @@ -0,0 +1,57 @@ +import os + +import pytest + +import fsspec + + +def test_move_raises_error_with_tmpdir(tmpdir): + # Create a file in the temporary directory + source = tmpdir.join("source_file.txt") + source.write("content") + + # Define a destination that simulates a protected or invalid path + destination = tmpdir.join("non_existent_directory/destination_file.txt") + + # Instantiate the filesystem (assuming the local file system interface) + fs = fsspec.filesystem("file") + + # Use the actual file paths as string + with pytest.raises(FileNotFoundError): + fs.mv(str(source), str(destination)) + + +@pytest.mark.parametrize("recursive", (True, False)) +def test_move_raises_error_with_tmpdir_permission(recursive, tmpdir): + # Create a file in the temporary directory + source = tmpdir.join("source_file.txt") + source.write("content") + + # Create a protected directory (non-writable) + protected_dir = tmpdir.mkdir("protected_directory") + protected_path = str(protected_dir) + + # Set the directory to read-only + if os.name == "nt": + os.system(f'icacls "{protected_path}" /deny Everyone:(W)') + else: + os.chmod(protected_path, 0o555) # Sets the directory to read-only + + # Define a destination inside the protected directory + destination = protected_dir.join("destination_file.txt") + + # Instantiate the filesystem (assuming the local file system interface) + fs = fsspec.filesystem("file") + + # Try to move the file to the read-only directory, expecting a permission error + with pytest.raises(PermissionError): + fs.mv(str(source), str(destination), recursive=recursive) + + # Assert the file was not created in the destination + assert not os.path.exists(destination) + + # Cleanup: Restore permissions so the directory can be cleaned up + if os.name == "nt": + os.system(f'icacls "{protected_path}" /remove:d Everyone') + else: + os.chmod(protected_path, 0o755) # Restore write permission for cleanup diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..33b1352017fa3939ee32c08e027f57deefb9470f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2014-2022, imageio developers +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/METADATA b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..62f66512a286ea0c1f48f6280bcd23faaa667cb4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/METADATA @@ -0,0 +1,142 @@ +Metadata-Version: 2.1 +Name: imageio +Version: 2.37.0 +Summary: Library for reading and writing a wide range of image, video, scientific, and volumetric data formats. +Home-page: https://github.com/imageio/imageio +Download-URL: http://pypi.python.org/pypi/imageio +Author: imageio contributors +Author-email: almar.klein@gmail.com +License: BSD-2-Clause +Keywords: image video volume imread imwrite io animation ffmpeg +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Provides: imageio +Requires-Python: >=3.9 +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: pillow>=8.3.2 +Provides-Extra: all-plugins +Requires-Dist: astropy; extra == "all-plugins" +Requires-Dist: av; extra == "all-plugins" +Requires-Dist: imageio-ffmpeg; extra == "all-plugins" +Requires-Dist: numpy>2; extra == "all-plugins" +Requires-Dist: pillow-heif; extra == "all-plugins" +Requires-Dist: psutil; extra == "all-plugins" +Requires-Dist: rawpy; extra == "all-plugins" +Requires-Dist: tifffile; extra == "all-plugins" +Provides-Extra: all-plugins-pypy +Requires-Dist: av; extra == "all-plugins-pypy" +Requires-Dist: imageio-ffmpeg; extra == "all-plugins-pypy" +Requires-Dist: pillow-heif; extra == "all-plugins-pypy" +Requires-Dist: psutil; extra == "all-plugins-pypy" +Requires-Dist: tifffile; extra == "all-plugins-pypy" +Provides-Extra: bsdf +Provides-Extra: build +Requires-Dist: wheel; extra == "build" +Provides-Extra: dev +Requires-Dist: pytest; extra == "dev" +Requires-Dist: pytest-cov; extra == "dev" +Requires-Dist: fsspec[github]; extra == "dev" +Requires-Dist: black; extra == "dev" +Requires-Dist: flake8; extra == "dev" +Provides-Extra: dicom +Provides-Extra: docs +Requires-Dist: sphinx<6; extra == "docs" +Requires-Dist: numpydoc; extra == "docs" +Requires-Dist: pydata-sphinx-theme; extra == "docs" +Provides-Extra: feisem +Provides-Extra: ffmpeg +Requires-Dist: imageio-ffmpeg; extra == "ffmpeg" +Requires-Dist: psutil; extra == "ffmpeg" +Provides-Extra: fits +Requires-Dist: astropy; extra == "fits" +Provides-Extra: freeimage +Provides-Extra: full +Requires-Dist: astropy; extra == "full" +Requires-Dist: av; extra == "full" +Requires-Dist: black; extra == "full" +Requires-Dist: flake8; extra == "full" +Requires-Dist: fsspec[github]; extra == "full" +Requires-Dist: gdal; extra == "full" +Requires-Dist: imageio-ffmpeg; extra == "full" +Requires-Dist: itk; extra == "full" +Requires-Dist: numpy>2; extra == "full" +Requires-Dist: numpydoc; extra == "full" +Requires-Dist: pillow-heif; extra == "full" +Requires-Dist: psutil; extra == "full" +Requires-Dist: pydata-sphinx-theme; extra == "full" +Requires-Dist: pytest; extra == "full" +Requires-Dist: pytest-cov; extra == "full" +Requires-Dist: rawpy; extra == "full" +Requires-Dist: sphinx<6; extra == "full" +Requires-Dist: tifffile; extra == "full" +Requires-Dist: wheel; extra == "full" +Provides-Extra: gdal +Requires-Dist: gdal; extra == "gdal" +Provides-Extra: itk +Requires-Dist: itk; extra == "itk" +Provides-Extra: linting +Requires-Dist: black; extra == "linting" +Requires-Dist: flake8; extra == "linting" +Provides-Extra: lytro +Provides-Extra: numpy +Provides-Extra: pillow +Provides-Extra: pillow-heif +Requires-Dist: pillow-heif; extra == "pillow-heif" +Provides-Extra: pyav +Requires-Dist: av; extra == "pyav" +Provides-Extra: rawpy +Requires-Dist: rawpy; extra == "rawpy" +Requires-Dist: numpy>2; extra == "rawpy" +Provides-Extra: simpleitk +Provides-Extra: spe +Provides-Extra: swf +Provides-Extra: test +Requires-Dist: pytest; extra == "test" +Requires-Dist: pytest-cov; extra == "test" +Requires-Dist: fsspec[github]; extra == "test" +Provides-Extra: tifffile +Requires-Dist: tifffile; extra == "tifffile" + + +.. image:: https://github.com/imageio/imageio/workflows/CI/badge.svg + :target: https://github.com/imageio/imageio/actions + + +Imageio is a Python library that provides an easy interface to read and +write a wide range of image data, including animated images, volumetric +data, and scientific formats. It is cross-platform, runs on Python 3.9+, +and is easy to install. + +Main website: https://imageio.readthedocs.io/ + + +Release notes: https://github.com/imageio/imageio/blob/master/CHANGELOG.md + +Example: + +.. code-block:: python + + >>> import imageio + >>> im = imageio.imread('imageio:astronaut.png') + >>> im.shape # im is a numpy array + (512, 512, 3) + >>> imageio.imwrite('astronaut-gray.jpg', im[:, :, 0]) + +See the `API Reference `_ +or `examples `_ +for more information. diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/RECORD b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3937085b1299be004ca90dd59d845ae8ce18ecfa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/RECORD @@ -0,0 +1,117 @@ +../../../bin/imageio_download_bin,sha256=j_kuRpm-_tiMA9d8ULEwclGwH_qEFhioDyOKHcWtaxQ,251 +../../../bin/imageio_remove_bin,sha256=go5n4au3pazXZYsjIMuZIhsmWtH6jHvimlLFmb6_6C8,247 +imageio-2.37.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +imageio-2.37.0.dist-info/LICENSE,sha256=rlmepQpJTvtyXkIKqzXR91kgDP5BhrbGSjC6Sds_0GQ,1307 +imageio-2.37.0.dist-info/METADATA,sha256=Gdtdu5UVCepo14mgXJFMii_9WVxfgXHpalIVCccaMkg,5197 +imageio-2.37.0.dist-info/RECORD,, +imageio-2.37.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imageio-2.37.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92 +imageio-2.37.0.dist-info/entry_points.txt,sha256=0-yB6XGfrx1OMPw_xigPramTcwi5M4jX6L5Edrz0OoU,130 +imageio-2.37.0.dist-info/top_level.txt,sha256=iSUjc-wEw-xbMTvMOSKg85n0-E7Ms--Mo4FLMC-J2YM,8 +imageio/__init__.py,sha256=EXKZMIL6MLLQ_aMuXONkxVSCBcM9bagNtrqauzwzLyI,3272 +imageio/__main__.py,sha256=s5nidb9wRZ6AbimHTPHULt3sTXPx4mqNil67KJHZvd4,5393 +imageio/__pycache__/__init__.cpython-310.pyc,, +imageio/__pycache__/__main__.cpython-310.pyc,, +imageio/__pycache__/freeze.cpython-310.pyc,, +imageio/__pycache__/testing.cpython-310.pyc,, +imageio/__pycache__/typing.cpython-310.pyc,, +imageio/__pycache__/v2.cpython-310.pyc,, +imageio/__pycache__/v3.cpython-310.pyc,, +imageio/config/__init__.py,sha256=8NOpL5ePrkiioJb9hRBw3rydc4iNZkMwp7VdQlP4jDc,307 +imageio/config/__pycache__/__init__.cpython-310.pyc,, +imageio/config/__pycache__/extensions.cpython-310.pyc,, +imageio/config/__pycache__/plugins.cpython-310.pyc,, +imageio/config/extensions.py,sha256=_-jpfdRI8PtELEfqlKGEZNhjO27yp6TuZ2clUFehzn8,47022 +imageio/config/extensions.pyi,sha256=sLrA-wt09kPHBDJP79tGtEOX7XTcEEjRzA70O8BCsD0,605 +imageio/config/plugins.py,sha256=fh4AiZof4Z_chpixhCi37OHZEnNalt58v6CprgEmlOE,20277 +imageio/config/plugins.pyi,sha256=pzH8pacqU5uldsvYOee_nhd2Hkk3mR8VQBtjeVnkkHY,706 +imageio/core/__init__.py,sha256=PSkGH8K76ntSWhwM4j7W49UmCSZf_OGaSl9fNbQP7uQ,639 +imageio/core/__pycache__/__init__.cpython-310.pyc,, +imageio/core/__pycache__/fetching.cpython-310.pyc,, +imageio/core/__pycache__/findlib.cpython-310.pyc,, +imageio/core/__pycache__/format.cpython-310.pyc,, +imageio/core/__pycache__/imopen.cpython-310.pyc,, +imageio/core/__pycache__/legacy_plugin_wrapper.cpython-310.pyc,, +imageio/core/__pycache__/request.cpython-310.pyc,, +imageio/core/__pycache__/util.cpython-310.pyc,, +imageio/core/__pycache__/v3_plugin_api.cpython-310.pyc,, +imageio/core/fetching.py,sha256=r81yBsJMqkwAXeVAuQuAzbk9etWxQUEUe4__UUjpQpc,9176 +imageio/core/findlib.py,sha256=Zrhs0rEyp8p8iSIuCoBco0dCaB5dxJVZ4lRgv82Sqm0,5552 +imageio/core/format.py,sha256=glQcJOZHEOST3u0jOa338ZxJBX_daEe6xl7-UKxuU6E,30917 +imageio/core/format.pyi,sha256=5BZF-xwp5BmG8C5ahfL48z_a2MITN0509Uf6f1phZRw,3336 +imageio/core/imopen.py,sha256=SA4OJj93B09CHsKSILdH1w3zdVWvRSopNWlGlS0f4t0,9752 +imageio/core/imopen.pyi,sha256=8jLI2tKUTqFe79mccw95fRAIanJPHi6gQmzB2ClESlk,2215 +imageio/core/legacy_plugin_wrapper.py,sha256=CYGXhJY-18HkVYqyzlepM7NcZ9VLvBjFjNj64HOBqBM,12136 +imageio/core/legacy_plugin_wrapper.pyi,sha256=ENmdth_Avp2yTzuyInGWT2QXgAv72RrFRd6QH71LVqU,1064 +imageio/core/request.py,sha256=udbseSPuVNuv8AAam2j5Q6UNXngAhVq15T2l020amKY,27047 +imageio/core/request.pyi,sha256=ivqAXs3UfxhuXQfg8qsAtEVymCsppPwadztFzSXpIAo,2315 +imageio/core/util.py,sha256=lTVJMIbxF-6fInx0ExLuCeMDSVMfSUiSc6WTdP0bt3M,18739 +imageio/core/v3_plugin_api.py,sha256=w8wUjlT7_N6aU76DYGF3ubYYfUHTyfStvK5_xosZLPQ,15560 +imageio/freeze.py,sha256=hi9MNZz-ridgQBWcAqnd92sULek2lgmBSTmuott5lus,170 +imageio/plugins/__init__.py,sha256=GSxtio0ph5QHP2asdLvyzW8lVfiRqOii8kaqYsBO9CE,3469 +imageio/plugins/__pycache__/__init__.cpython-310.pyc,, +imageio/plugins/__pycache__/_bsdf.cpython-310.pyc,, +imageio/plugins/__pycache__/_dicom.cpython-310.pyc,, +imageio/plugins/__pycache__/_freeimage.cpython-310.pyc,, +imageio/plugins/__pycache__/_swf.cpython-310.pyc,, +imageio/plugins/__pycache__/_tifffile.cpython-310.pyc,, +imageio/plugins/__pycache__/bsdf.cpython-310.pyc,, +imageio/plugins/__pycache__/dicom.cpython-310.pyc,, +imageio/plugins/__pycache__/example.cpython-310.pyc,, +imageio/plugins/__pycache__/feisem.cpython-310.pyc,, +imageio/plugins/__pycache__/ffmpeg.cpython-310.pyc,, +imageio/plugins/__pycache__/fits.cpython-310.pyc,, +imageio/plugins/__pycache__/freeimage.cpython-310.pyc,, +imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc,, +imageio/plugins/__pycache__/gdal.cpython-310.pyc,, +imageio/plugins/__pycache__/grab.cpython-310.pyc,, +imageio/plugins/__pycache__/lytro.cpython-310.pyc,, +imageio/plugins/__pycache__/npz.cpython-310.pyc,, +imageio/plugins/__pycache__/opencv.cpython-310.pyc,, +imageio/plugins/__pycache__/pillow.cpython-310.pyc,, +imageio/plugins/__pycache__/pillow_info.cpython-310.pyc,, +imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc,, +imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc,, +imageio/plugins/__pycache__/pyav.cpython-310.pyc,, +imageio/plugins/__pycache__/rawpy.cpython-310.pyc,, +imageio/plugins/__pycache__/simpleitk.cpython-310.pyc,, +imageio/plugins/__pycache__/spe.cpython-310.pyc,, +imageio/plugins/__pycache__/swf.cpython-310.pyc,, +imageio/plugins/__pycache__/tifffile.cpython-310.pyc,, +imageio/plugins/__pycache__/tifffile_v3.cpython-310.pyc,, +imageio/plugins/_bsdf.py,sha256=b-QjkZvz9DPDbygiKhee-47Ld2eOqxpYEdZ1mnrRPJ4,32753 +imageio/plugins/_dicom.py,sha256=NHXUH0e0gQfWpb8efxMg4hdH0O_tBPcjtlD7Djptu5o,34074 +imageio/plugins/_freeimage.py,sha256=GD25ZqqvbFnBILPRYHrTb5qbFsvXBVKv_qIE3139D68,51740 +imageio/plugins/_swf.py,sha256=kh3H2v98bgHpVagGNbhGUodh0s-weiESraX6qzMnD2k,25760 +imageio/plugins/_tifffile.py,sha256=J_j22qMzL8lRT85ykGRFGGkA5LPRi3YUz66wIH4qt90,371588 +imageio/plugins/bsdf.py,sha256=spISvLLVH319wDJ8YhYcvDTaJe2acElgWSvgqEkpd_g,12852 +imageio/plugins/dicom.py,sha256=mQYNbTyum4jVhjZQ8TU-4A5csHpQfT-BRBBCP5fu6Zs,12621 +imageio/plugins/example.py,sha256=4POb_LDQtSxHWxiflGqGKKKKrpItqLIFQeU8x7tro-c,5501 +imageio/plugins/feisem.py,sha256=AKwZv7Zac0_grnr-wnzU7R0Zf2KSUe91k06evPa1NI8,3360 +imageio/plugins/ffmpeg.py,sha256=N8Qq1TU5gr7U9IM-FCEuM9VIy1Jv875OC_XorStoOPI,29930 +imageio/plugins/fits.py,sha256=XnlmeC79sIiIPd_7IDx05-p3-b2unO4CVR0nWAA4ph0,4531 +imageio/plugins/freeimage.py,sha256=SuzYuGvCtZIiXIr51dWRTl5CATzRUqb8pNCSIg9YZv8,14645 +imageio/plugins/freeimagemulti.py,sha256=7jW3mJX-ZVnDqe2veIvU9wPY_x0EBOmPKP8ppPxRO_M,11288 +imageio/plugins/gdal.py,sha256=r2Ux7MQeHCUsmdk0aGENzGX8M5hCBU7NJomcf6G8FCU,1653 +imageio/plugins/grab.py,sha256=g6KbKVQUquHro_BW6He7NNmivVV-UtcsCJoDt3rdly0,2776 +imageio/plugins/lytro.py,sha256=V3dToE-eV6jLhtae26_uHHgOx6O1LsOo0hm7nnRptMM,25310 +imageio/plugins/npz.py,sha256=7ZQr-4lQEKbfjaF6rOmpq9pQgDTUHvkZa_NHZkJWBQo,2670 +imageio/plugins/opencv.py,sha256=C2nBQQFDXuz6LOyJ1P3-S6e_7h-pJgLow7h7w4Si2tg,11629 +imageio/plugins/pillow.py,sha256=4siuR0UENadfQgdQ2z5bFWX464KMzMcfqIEKEBDzt6M,22318 +imageio/plugins/pillow_info.py,sha256=Bt5iJtQnAh6mGViPIxhxRQPNidqay9-6BleAJZkhN1w,36624 +imageio/plugins/pillow_legacy.py,sha256=0tgk-8b5gduFjvNQN5WoK_-7EbVJ9i3-_kUVhaRLVnY,31580 +imageio/plugins/pillowmulti.py,sha256=-wsWpq0j2WXDgQGbyUuzCmw7iqSDz7e6AYqYhs46ZE8,11807 +imageio/plugins/pyav.py,sha256=R_shCM9q3QVvdKtuH53jcftLfNSL-2G0jAFKpCWxQkU,46354 +imageio/plugins/rawpy.py,sha256=o34WkwoU0LLpNDhj2kNuPS9ju6f7CBN-jKY9n0jLeDs,5948 +imageio/plugins/simpleitk.py,sha256=ldQWjkiCSZPoUnN87MtUqRIMMcIKmk8ZUeyDCQhnpG0,4107 +imageio/plugins/spe.py,sha256=UyXgHZV-3gwwU-RmJUhgDnJ843wt9H3S3Fjs84faz38,32172 +imageio/plugins/swf.py,sha256=0B9f-HF528OcHXTIF3nptoSJUu4GNId03rFLfFFOaFk,11756 +imageio/plugins/tifffile.py,sha256=m8qgNy-lJkwHwKkyp3pZn2xYsnRRwZ8FVMpM-BIs6dI,20665 +imageio/plugins/tifffile_v3.py,sha256=Vs2ngBBptUoJ6QpT9EjyNd4-dih8zzGEvcq2mRNYFXg,14335 +imageio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imageio/testing.py,sha256=tkRPxZZpG68q_MAIux8WE8QeKbhbq6rDPVfCDsof1Ms,1597 +imageio/typing.py,sha256=qrvyFrVIs21bZCE0x802l1R-xCV4DlCNaTzPiJEZbzc,349 +imageio/v2.py,sha256=1KJ5z8Ji2nnAdy_K3vIpysG2Kg7rIcPiadNG1pwKx-E,21563 +imageio/v2.pyi,sha256=ROazbwu1rSJLBaEtXmUG2oT9BMr7ZlyyW26twgFWx5E,2250 +imageio/v3.py,sha256=ZE0IlERPT_4wryYqUOD4-LLc6dVpDZXV6N6JEQtbMiQ,9267 +imageio/v3.pyi,sha256=AtLP0IWqS-sX1qDyHPdjCCIsKGwXU5z41XOXzUj2pGQ,1344 diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..79d5c89a71989389294854aa34e329701325f8b0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.45.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/entry_points.txt b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa30161a1450bf11c7bfa6c548a23df8ff3636f5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +imageio_download_bin = imageio.__main__:download_bin_main +imageio_remove_bin = imageio.__main__:remove_bin_main diff --git a/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a464e4cd37851148ffe3b7ae88921620c50cfe03 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio-2.37.0.dist-info/top_level.txt @@ -0,0 +1 @@ +imageio