diff --git a/.gitattributes b/.gitattributes index c12353310b14bb699a00799057cd7663d8af776b..b626fc80a349ade22e6d083b4ab636aa73fa4ed0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1439,3 +1439,4 @@ parrot/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_ba vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vglm/bin/python filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c779ca7c381e8746c07cbe700b20bb3a2559a9ff --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__init__.py @@ -0,0 +1,44 @@ +from cupyx.scipy.sparse._base import issparse # NOQA +from cupyx.scipy.sparse._base import isspmatrix # NOQA +from cupyx.scipy.sparse._base import spmatrix # NOQA +from cupyx.scipy.sparse._base import SparseWarning # NOQA +from cupyx.scipy.sparse._base import SparseEfficiencyWarning # NOQA +from cupyx.scipy.sparse._coo import coo_matrix # NOQA +from cupyx.scipy.sparse._coo import isspmatrix_coo # NOQA +from cupyx.scipy.sparse._csc import csc_matrix # NOQA +from cupyx.scipy.sparse._csc import isspmatrix_csc # NOQA +from cupyx.scipy.sparse._csr import csr_matrix # NOQA +from cupyx.scipy.sparse._csr import isspmatrix_csr # NOQA +from cupyx.scipy.sparse._dia import dia_matrix # NOQA +from cupyx.scipy.sparse._dia import isspmatrix_dia # NOQA + +from cupyx.scipy.sparse._construct import eye # NOQA +from cupyx.scipy.sparse._construct import identity # NOQA +from cupyx.scipy.sparse._construct import rand # NOQA +from cupyx.scipy.sparse._construct import random # NOQA +from cupyx.scipy.sparse._construct import spdiags # NOQA +from cupyx.scipy.sparse._construct import diags # NOQA + +from cupyx.scipy.sparse._construct import bmat # NOQA +from cupyx.scipy.sparse._construct import hstack # NOQA +from cupyx.scipy.sparse._construct import vstack # NOQA + +# TODO(unno): implement bsr_matrix +# TODO(unno): implement dok_matrix +# TODO(unno): implement lil_matrix + +from cupyx.scipy.sparse._construct import kron # NOQA +from cupyx.scipy.sparse._construct import kronsum # NOQA +# TODO(unno): implement diags +# TODO(unno): implement block_diag + +from cupyx.scipy.sparse._extract import find # NOQA +from cupyx.scipy.sparse._extract import tril # NOQA +from cupyx.scipy.sparse._extract import triu # NOQA + +# TODO(unno): implement save_npz +# TODO(unno): implement load_npz + +# TODO(unno): implement isspmatrix_bsr(x) +# TODO(unno): implement isspmatrix_lil(x) +# TODO(unno): implement isspmatrix_dok(x) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_base.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..029d20814987e0267ba29649dbd0eebb32c5cd05 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_base.py @@ -0,0 +1,585 @@ +import numpy + +import cupy +from cupy import _core +from cupyx.scipy.sparse import _util +from cupyx.scipy.sparse import _sputils + + +try: + import scipy.sparse as _sparse + SparseWarning = _sparse.SparseWarning + SparseEfficiencyWarning = _sparse.SparseEfficiencyWarning +except ImportError: + class SparseWarning(Warning): # type: ignore + pass + + class SparseEfficiencyWarning(SparseWarning): # type: ignore + pass + + +# TODO(asi1024): Implement _spbase + + +class spmatrix(object): + + """Base class of all sparse matrixes. + + See :class:`scipy.sparse.spmatrix` + """ + + __array_priority__ = 101 + + def __init__(self, maxprint=50): + if self.__class__ == spmatrix: + raise ValueError( + 'This class is not intended to be instantiated directly.') + self.maxprint = maxprint + + @property + def device(self): + """CUDA device on which this array resides.""" + raise NotImplementedError + + def get(self, stream=None): + """Return a copy of the array on host memory. + + Args: + stream (cupy.cuda.Stream): CUDA stream object. If it is given, the + copy runs asynchronously. Otherwise, the copy is synchronous. + + Returns: + scipy.sparse.spmatrix: An array on host memory. + + """ + raise NotImplementedError + + def __len__(self): + raise TypeError('sparse matrix length is ambiguous; ' + 'use getnnz() or shape[0]') + + def __str__(self): + # TODO(unno): Do not use get method which is only available when scipy + # is installed. + return str(self.get()) + + def __iter__(self): + for r in range(self.shape[0]): + yield self[r, :] + + def __bool__(self): + if self.shape == (1, 1): + return self.nnz != 0 + else: + raise ValueError('The truth value of an array with more than one ' + 'element is ambiguous. Use a.any() or a.all().') + + __nonzero__ = __bool__ + + def __eq__(self, other): + return self.tocsr().__eq__(other) + + def __ne__(self, other): + return self.tocsr().__ne__(other) + + def __lt__(self, other): + return self.tocsr().__lt__(other) + + def __gt__(self, other): + return self.tocsr().__gt__(other) + + def __le__(self, other): + return self.tocsr().__le__(other) + + def __ge__(self, other): + return self.tocsr().__ge__(other) + + def __abs__(self): + return self.tocsr().__abs__() + + def __add__(self, other): + return self.tocsr().__add__(other) + + def __radd__(self, other): + return self.tocsr().__radd__(other) + + def __sub__(self, other): + return self.tocsr().__sub__(other) + + def __rsub__(self, other): + return self.tocsr().__rsub__(other) + + def __mul__(self, other): + return self.tocsr().__mul__(other) + + def __rmul__(self, other): + if cupy.isscalar(other) or isdense(other) and other.ndim == 0: + return self * other + else: + try: + tr = other.T + except AttributeError: + return NotImplemented + return (self.T * tr).T + + # matmul (@) operator + def __matmul__(self, other): + if _util.isscalarlike(other): + raise ValueError('Scalar operands are not allowed, ' + 'use \'*\' instead') + return self.__mul__(other) + + def __rmatmul__(self, other): + if _util.isscalarlike(other): + raise ValueError('Scalar operands are not allowed, ' + 'use \'*\' instead') + return self.__rmul__(other) + + def __div__(self, other): + return self.tocsr().__div__(other) + + def __rdiv__(self, other): + return self.tocsr().__rdiv__(other) + + def __truediv__(self, other): + return self.tocsr().__truediv__(other) + + def __rtruediv__(self, other): + return self.tocsr().__rtruediv__(other) + + def __neg__(self): + return -self.tocsr() + + def __iadd__(self, other): + return NotImplemented + + def __isub__(self, other): + return NotImplemented + + def __imul__(self, other): + return NotImplemented + + def __idiv__(self, other): + return self.__itruediv__(other) + + def __itruediv__(self, other): + return NotImplemented + + def __pow__(self, other): + """Calculates n-th power of the matrix. + + This method calculates n-th power of a given matrix. The matrix must + be a squared matrix, and a given exponent must be an integer. + + Args: + other (int): Exponent. + + Returns: + cupyx.scipy.sparse.spmatrix: A sparse matrix representing n-th + power of this matrix. + + """ + m, n = self.shape + if m != n: + raise TypeError('matrix is not square') + + if _util.isintlike(other): + other = int(other) + if other < 0: + raise ValueError('exponent must be >= 0') + + if other == 0: + import cupyx.scipy.sparse + return cupyx.scipy.sparse.identity( + m, dtype=self.dtype, format='csr') + elif other == 1: + return self.copy() + else: + tmp = self.__pow__(other // 2) + if other % 2: + return self * tmp * tmp + else: + return tmp * tmp + elif _util.isscalarlike(other): + raise ValueError('exponent must be an integer') + else: + return NotImplemented + + @property + def A(self): + """Dense ndarray representation of this matrix. + + This property is equivalent to + :meth:`~cupyx.scipy.sparse.spmatrix.toarray` method. + + """ + return self.toarray() + + @property + def T(self): + return self.transpose() + + @property + def H(self): + return self.getH() + + @property + def ndim(self): + return 2 + + @property + def size(self): + return self.getnnz() + + @property + def nnz(self): + return self.getnnz() + + @property + def shape(self): + return self.get_shape() + + @shape.setter + def shape(self, value): + self.set_shape(value) + + def asformat(self, format): + """Return this matrix in a given sparse format. + + Args: + format (str or None): Format you need. + """ + if format is None or format == self.format: + return self + else: + return getattr(self, 'to' + format)() + + def asfptype(self): + """Upcasts matrix to a floating point format. + + When the matrix has floating point type, the method returns itself. + Otherwise it makes a copy with floating point type and the same format. + + Returns: + cupyx.scipy.sparse.spmatrix: A matrix with float type. + + """ + if self.dtype.kind == 'f': + return self + else: + typ = numpy.promote_types(self.dtype, 'f') + return self.astype(typ) + + def astype(self, t): + """Casts the array to given data type. + + Args: + t: Type specifier. + + Returns: + cupyx.scipy.sparse.spmatrix: + A copy of the array with the given type and the same format. + + """ + return self.tocsr().astype(t).asformat(self.format) + + def conj(self, copy=True): + """Element-wise complex conjugation. + + If the matrix is of non-complex data type and `copy` is False, + this method does nothing and the data is not copied. + + Args: + copy (bool): + If True, the result is guaranteed to not share data with self. + + Returns: + cupyx.scipy.sparse.spmatrix : The element-wise complex conjugate. + + """ + if self.dtype.kind == 'c': + return self.tocsr(copy=copy).conj(copy=False) + elif copy: + return self.copy() + else: + return self + + def conjugate(self, copy=True): + return self.conj(copy=copy) + + conjugate.__doc__ = conj.__doc__ + + def copy(self): + """Returns a copy of this matrix. + + No data/indices will be shared between the returned value and current + matrix. + """ + return self.__class__(self, copy=True) + + def count_nonzero(self): + """Number of non-zero entries, equivalent to""" + raise NotImplementedError + + def diagonal(self, k=0): + """Returns the k-th diagonal of the matrix. + + Args: + k (int, optional): Which diagonal to get, corresponding to elements + a[i, i+k]. Default: 0 (the main diagonal). + + Returns: + cupy.ndarray : The k-th diagonal. + """ + return self.tocsr().diagonal(k=k) + + def dot(self, other): + """Ordinary dot product""" + if numpy.isscalar(other): + return self * other + else: + return self @ other + + def getH(self): + return self.transpose().conj() + + def get_shape(self): + raise NotImplementedError + + # TODO(unno): Implement getcol + + def getformat(self): + return self.format + + def getmaxprint(self): + return self.maxprint + + def getnnz(self, axis=None): + """Number of stored values, including explicit zeros.""" + raise NotImplementedError + + # TODO(unno): Implement getrow + + def maximum(self, other): + return self.tocsr().maximum(other) + + def mean(self, axis=None, dtype=None, out=None): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the matrix elements. The average is taken + over all elements in the matrix by default, otherwise over the + specified axis. `float64` intermediate and return values are used + for integer inputs. + + Args: + axis {-2, -1, 0, 1, None}: optional + Axis along which the mean is computed. The default is to + compute the mean of all elements in the matrix + (i.e., `axis` = `None`). + dtype (dtype): optional + Type to use in computing the mean. For integer inputs, the + default is `float64`; for floating point inputs, it is the same + as the input dtype. + out (cupy.ndarray): optional + Alternative output matrix in which to place the result. It must + have the same shape as the expected output, but the type of the + output values will be cast if necessary. + + Returns: + m (cupy.ndarray) : Output array of means + + .. seealso:: + :meth:`scipy.sparse.spmatrix.mean` + + """ + def _is_integral(dtype): + return (cupy.issubdtype(dtype, cupy.integer) or + cupy.issubdtype(dtype, cupy.bool_)) + + _sputils.validateaxis(axis) + + res_dtype = self.dtype.type + integral = _is_integral(self.dtype) + + # output dtype + if dtype is None: + if integral: + res_dtype = cupy.float64 + else: + res_dtype = cupy.dtype(dtype).type + + # intermediate dtype for summation + inter_dtype = cupy.float64 if integral else res_dtype + inter_self = self.astype(inter_dtype) + + if axis is None: + return (inter_self / cupy.array( + self.shape[0] * self.shape[1]))\ + .sum(dtype=res_dtype, out=out) + + if axis < 0: + axis += 2 + + # axis = 0 or 1 now + if axis == 0: + return (inter_self * (1.0 / self.shape[0])).sum( + axis=0, dtype=res_dtype, out=out) + else: + return (inter_self * (1.0 / self.shape[1])).sum( + axis=1, dtype=res_dtype, out=out) + + def minimum(self, other): + return self.tocsr().minimum(other) + + def multiply(self, other): + """Point-wise multiplication by another matrix""" + return self.tocsr().multiply(other) + + # TODO(unno): Implement nonzero + + def power(self, n, dtype=None): + return self.tocsr().power(n, dtype=dtype) + + def reshape(self, *shape, order='C'): + """Gives a new shape to a sparse matrix without changing its data. + + Args: + shape (tuple): + The new shape should be compatible with the original shape. + order: {'C', 'F'} (optional) + Read the elements using this index order. 'C' means to read and + write the elements using C-like index order. 'F' means to read + and write the elements using Fortran-like index order. Default: + C. + + Returns: + cupyx.scipy.sparse.coo_matrix: sparse matrix + + """ + shape = _sputils.check_shape(shape, self.shape) + + if shape == self.shape: + return self + + return self.tocoo().reshape(shape, order=order) + + def set_shape(self, shape): + self.reshape(shape) + + def setdiag(self, values, k=0): + """Set diagonal or off-diagonal elements of the array. + + Args: + values (cupy.ndarray): New values of the diagonal elements. + Values may have any length. If the diagonal is longer than + values, then the remaining diagonal entries will not be set. + If values is longer than the diagonal, then the remaining + values are ignored. If a scalar value is given, all of the + diagonal is set to it. + k (int, optional): Which diagonal to set, corresponding to elements + a[i, i+k]. Default: 0 (the main diagonal). + """ + raise NotImplementedError + + def sum(self, axis=None, dtype=None, out=None): + """Sums the matrix elements over a given axis. + + Args: + axis (int or ``None``): Axis along which the sum is computed. + If it is ``None``, it computes the sum of all the elements. + Select from ``{None, 0, 1, -2, -1}``. + dtype: The type of returned matrix. If it is not specified, type + of the array is used. + out (cupy.ndarray): Output matrix. + + Returns: + cupy.ndarray: Summed array. + + .. seealso:: + :meth:`scipy.sparse.spmatrix.sum` + + """ + _sputils.validateaxis(axis) + + # This implementation uses multiplication, though it is not efficient + # for some matrix types. These should override this function. + + m, n = self.shape + + if axis is None: + return self.dot(cupy.ones(n, dtype=self.dtype)).sum( + dtype=dtype, out=out) + + if axis < 0: + axis += 2 + + if axis == 0: + ret = self.T.dot(cupy.ones(m, dtype=self.dtype)).reshape(1, n) + else: # axis == 1 + ret = self.dot(cupy.ones(n, dtype=self.dtype)).reshape(m, 1) + + if out is not None: + if out.shape != ret.shape: + raise ValueError('dimensions do not match') + _core.elementwise_copy(ret, out) + return out + elif dtype is not None: + return ret.astype(dtype, copy=False) + else: + return ret + + def toarray(self, order=None, out=None): + """Return a dense ndarray representation of this matrix.""" + return self.tocsr().toarray(order=order, out=out) + + def tobsr(self, blocksize=None, copy=False): + """Convert this matrix to Block Sparse Row format.""" + return self.tocsr(copy=copy).tobsr(copy=False) + + def tocoo(self, copy=False): + """Convert this matrix to COOrdinate format.""" + return self.tocsr(copy=copy).tocoo(copy=False) + + def tocsc(self, copy=False): + """Convert this matrix to Compressed Sparse Column format.""" + return self.tocsr(copy=copy).tocsc(copy=False) + + def tocsr(self, copy=False): + """Convert this matrix to Compressed Sparse Row format.""" + raise NotImplementedError + + def todense(self, order=None, out=None): + """Return a dense matrix representation of this matrix.""" + return self.toarray(order=order, out=out) + + def todia(self, copy=False): + """Convert this matrix to sparse DIAgonal format.""" + return self.tocsr(copy=copy).todia(copy=False) + + def todok(self, copy=False): + """Convert this matrix to Dictionary Of Keys format.""" + return self.tocsr(copy=copy).todok(copy=False) + + def tolil(self, copy=False): + """Convert this matrix to LInked List format.""" + return self.tocsr(copy=copy).tolil(copy=False) + + def transpose(self, axes=None, copy=False): + """Reverses the dimensions of the sparse matrix.""" + return self.tocsr(copy=copy).transpose(axes=axes, copy=False) + + +def issparse(x): + """Checks if a given matrix is a sparse matrix. + + Returns: + bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.spmatrix` that is + a base class of all sparse matrix classes. + + """ + return isinstance(x, spmatrix) + + +isdense = _util.isdense +isspmatrix = issparse diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_compressed.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..485202b0a0700c6bc44b0bcd9e3984937169d17d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_compressed.py @@ -0,0 +1,860 @@ +import string +import warnings + +import numpy +try: + import scipy.sparse + scipy_available = True +except ImportError: + scipy_available = False + +import cupy +import cupyx + +from cupy import _core +from cupy._core import _scalar +from cupy._creation import basic +from cupyx.scipy.sparse import _base +from cupyx.scipy.sparse import _coo +from cupyx.scipy.sparse import _data as sparse_data +from cupyx.scipy.sparse import _sputils +from cupyx.scipy.sparse import _util + +from cupyx.scipy.sparse import _index + + +class _compressed_sparse_matrix(sparse_data._data_matrix, + sparse_data._minmax_mixin, + _index.IndexMixin): + + _max_min_reduction_code = r''' + extern "C" __global__ + void ${func}(double* data, int* x, int* y, int length, + double* z) { + // Get the index of the block + int tid = blockIdx.x * blockDim.x + threadIdx.x; + + // Calculate the block length + int block_length = y[tid] - x[tid]; + + // Select initial value based on the block density + double running_value = 0; + if (${cond}){ + running_value = data[x[tid]]; + } else { + running_value = 0; + } + + // Iterate over the block and update + for (int entry = x[tid]; entry < y[tid]; entry++){ + if (data[entry] != data[entry]){ + // Check for NaN + running_value = nan(""); + break; + } else { + // Check for a value update + if (data[entry] ${op} running_value){ + running_value = data[entry]; + } + } + } + + // Store in the return function + z[tid] = running_value; + }''' + + _max_reduction_kern = _core.RawKernel( + string.Template(_max_min_reduction_code).substitute( + func='max_reduction', op='>', cond='block_length == length'), + 'max_reduction') + + _max_nonzero_reduction_kern = _core.RawKernel( + string.Template(_max_min_reduction_code).substitute( + func='max_nonzero_reduction', op='>', cond='block_length > 0'), + 'max_nonzero_reduction') + + _min_reduction_kern = _core.RawKernel( + string.Template(_max_min_reduction_code).substitute( + func='min_reduction', op='<', cond='block_length == length'), + 'min_reduction') + + _min_nonzero_reduction_kern = _core.RawKernel( + string.Template(_max_min_reduction_code).substitute( + func='min_nonzero_reduction', op='<', cond='block_length > 0'), + 'min_nonzero_reduction') + + # For _max_arg_reduction_mod and _min_arg_reduction_mod below, we pick + # the right template specialization according to input dtypes at runtime. + # The distinction in int types (T2) is important for portability in OS. + + _argmax_argmin_code = r''' + template __global__ void + ${func}_arg_reduction(T1* data, int* indices, int* x, int* y, + int length, T2* z) { + // Get the index of the block + int tid = blockIdx.x * blockDim.x + threadIdx.x; + + // Calculate the block length + int block_length = y[tid] - x[tid]; + + // Select initial value based on the block density + int data_index = 0; + double data_value = 0; + + if (block_length == length){ + // Block is dense. Fill the first value + data_value = data[x[tid]]; + data_index = indices[x[tid]]; + } else if (block_length > 0) { + // Block has at least one zero. Assign first occurrence as the + // starting reference + data_value = 0; + for (data_index = 0; data_index < length; data_index++){ + if (data_index != indices[x[tid] + data_index] || + x[tid] + data_index >= y[tid]){ + break; + } + } + } else { + // Zero valued array + data_value = 0; + data_index = 0; + } + + // Iterate over the section of the sparse matrix + for (int entry = x[tid]; entry < y[tid]; entry++){ + if (data[entry] != data[entry]){ + // Check for NaN + data_value = nan(""); + data_index = 0; + break; + } else { + // Check for a value update + if (data[entry] ${op} data_value){ + data_index = indices[entry]; + data_value = data[entry]; + } + } + } + + // Store in the return function + z[tid] = data_index; + }''' + + _max_arg_reduction_mod = _core.RawModule( + code=string.Template(_argmax_argmin_code).substitute( + func='max', op='>'), + options=('-std=c++11',), + name_expressions=['max_arg_reduction', + 'max_arg_reduction', + 'max_arg_reduction', + 'max_arg_reduction']) + + _min_arg_reduction_mod = _core.RawModule( + code=string.Template(_argmax_argmin_code).substitute( + func='min', op='<'), + options=('-std=c++11',), + name_expressions=['min_arg_reduction', + 'min_arg_reduction', + 'min_arg_reduction', + 'min_arg_reduction']) + + # TODO(leofang): rewrite a more load-balanced approach than this naive one? + _has_sorted_indices_kern = _core.ElementwiseKernel( + 'raw T indptr, raw T indices', + 'bool diff', + ''' + bool diff_out = true; + for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) { + if (indices[jj] > indices[jj+1]){ + diff_out = false; + } + } + diff = diff_out; + ''', 'cupyx_scipy_sparse_has_sorted_indices') + + # TODO(leofang): rewrite a more load-balanced approach than this naive one? + _has_canonical_format_kern = _core.ElementwiseKernel( + 'raw T indptr, raw T indices', + 'bool diff', + ''' + bool diff_out = true; + if (indptr[i] > indptr[i+1]) { + diff = false; + return; + } + for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) { + if (indices[jj] >= indices[jj+1]) { + diff_out = false; + } + } + diff = diff_out; + ''', 'cupyx_scipy_sparse_has_canonical_format') + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + from cupyx import cusparse + + if shape is not None: + if not _util.isshape(shape): + raise ValueError('invalid shape (must be a 2-tuple of int)') + shape = int(shape[0]), int(shape[1]) + + if _base.issparse(arg1): + x = arg1.asformat(self.format) + data = x.data + indices = x.indices + indptr = x.indptr + + if arg1.format != self.format: + # When formats are different, all arrays are already copied + copy = False + + if shape is None: + shape = arg1.shape + + elif _util.isshape(arg1): + m, n = arg1 + m, n = int(m), int(n) + data = basic.zeros(0, dtype if dtype else 'd') + indices = basic.zeros(0, 'i') + indptr = basic.zeros(self._swap(m, n)[0] + 1, dtype='i') + # shape and copy argument is ignored + shape = (m, n) + copy = False + + elif scipy_available and scipy.sparse.issparse(arg1): + # Convert scipy.sparse to cupyx.scipy.sparse + x = arg1.asformat(self.format) + data = cupy.array(x.data) + indices = cupy.array(x.indices, dtype='i') + indptr = cupy.array(x.indptr, dtype='i') + copy = False + + if shape is None: + shape = arg1.shape + + elif isinstance(arg1, tuple) and len(arg1) == 2: + # Note: This implementation is not efficeint, as it first + # constructs a sparse matrix with coo format, then converts it to + # compressed format. + sp_coo = _coo.coo_matrix(arg1, shape=shape, dtype=dtype, copy=copy) + sp_compressed = sp_coo.asformat(self.format) + data = sp_compressed.data + indices = sp_compressed.indices + indptr = sp_compressed.indptr + + elif isinstance(arg1, tuple) and len(arg1) == 3: + data, indices, indptr = arg1 + if not (_base.isdense(data) and data.ndim == 1 and + _base.isdense(indices) and indices.ndim == 1 and + _base.isdense(indptr) and indptr.ndim == 1): + raise ValueError( + 'data, indices, and indptr should be 1-D') + + if len(data) != len(indices): + raise ValueError('indices and data should have the same size') + + elif _base.isdense(arg1): + if arg1.ndim > 2: + raise TypeError('expected dimension <= 2 array or matrix') + elif arg1.ndim == 1: + arg1 = arg1[None] + elif arg1.ndim == 0: + arg1 = arg1[None, None] + data, indices, indptr = self._convert_dense(arg1) + copy = False + if shape is None: + shape = arg1.shape + + else: + raise ValueError( + 'Unsupported initializer format') + + if dtype is None: + dtype = data.dtype + else: + dtype = numpy.dtype(dtype) + + if dtype.char not in '?fdFD': + raise ValueError( + 'Only bool, float32, float64, complex64 and complex128 ' + 'are supported') + + data = data.astype(dtype, copy=copy) + sparse_data._data_matrix.__init__(self, data) + + self.indices = indices.astype('i', copy=copy) + self.indptr = indptr.astype('i', copy=copy) + + if shape is None: + shape = self._swap(len(indptr) - 1, int(indices.max()) + 1) + + major, minor = self._swap(*shape) + if len(indptr) != major + 1: + raise ValueError('index pointer size (%d) should be (%d)' + % (len(indptr), major + 1)) + + self._descr = cusparse.MatDescriptor.create() + self._shape = shape + + def _with_data(self, data, copy=True): + if copy: + return self.__class__( + (data, self.indices.copy(), self.indptr.copy()), + shape=self.shape, + dtype=data.dtype) + else: + return self.__class__( + (data, self.indices, self.indptr), + shape=self.shape, + dtype=data.dtype) + + def _convert_dense(self, x): + raise NotImplementedError + + def _swap(self, x, y): + raise NotImplementedError + + def _add_sparse(self, other, alpha, beta): + raise NotImplementedError + + def _add(self, other, lhs_negative, rhs_negative): + if cupy.isscalar(other): + if other == 0: + if lhs_negative: + return -self + else: + return self.copy() + else: + raise NotImplementedError( + 'adding a nonzero scalar to a sparse matrix is not ' + 'supported') + elif _base.isspmatrix(other): + alpha = -1 if lhs_negative else 1 + beta = -1 if rhs_negative else 1 + return self._add_sparse(other, alpha, beta) + elif _base.isdense(other): + if lhs_negative: + if rhs_negative: + return -self.todense() - other + else: + return other - self.todense() + else: + if rhs_negative: + return self.todense() - other + else: + return self.todense() + other + else: + return NotImplemented + + def __add__(self, other): + return self._add(other, False, False) + + def __radd__(self, other): + return self._add(other, False, False) + + def __sub__(self, other): + return self._add(other, False, True) + + def __rsub__(self, other): + return self._add(other, True, False) + + def _get_intXint(self, row, col): + major, minor = self._swap(row, col) + data, indices, _ = _index._get_csr_submatrix_major_axis( + self.data, self.indices, self.indptr, major, major + 1) + dtype = data.dtype + res = cupy.zeros((), dtype=dtype) + if dtype.kind == 'c': + _index._compress_getitem_complex_kern( + data.real, data.imag, indices, minor, res.real, res.imag) + else: + _index._compress_getitem_kern(data, indices, minor, res) + return res + + def _get_sliceXslice(self, row, col): + major, minor = self._swap(row, col) + copy = major.step in (1, None) + return self._major_slice(major)._minor_slice(minor, copy=copy) + + def _get_arrayXarray(self, row, col, not_found_val=0): + # inner indexing + idx_dtype = self.indices.dtype + M, N = self._swap(*self.shape) + major, minor = self._swap(row, col) + major = major.astype(idx_dtype, copy=False) + minor = minor.astype(idx_dtype, copy=False) + + val = _index._csr_sample_values( + M, N, self.indptr, self.indices, self.data, + major.ravel(), minor.ravel(), + not_found_val) + + if major.ndim == 1: + # Scipy returns `matrix` here + return cupy.expand_dims(val, 0) + return self.__class__(val.reshape(major.shape)) + + def _get_columnXarray(self, row, col): + # outer indexing + major, minor = self._swap(row, col) + return self._major_index_fancy(major)._minor_index_fancy(minor) + + def _major_index_fancy(self, idx): + """Index along the major axis where idx is an array of ints. + """ + _, N = self._swap(*self.shape) + M = idx.size + new_shape = self._swap(M, N) + if self.nnz == 0 or M == 0: + return self.__class__(new_shape, dtype=self.dtype) + + return self.__class__( + _index._csr_row_index(self.data, self.indices, self.indptr, idx), + shape=new_shape, copy=False) + + def _minor_index_fancy(self, idx): + """Index along the minor axis where idx is an array of ints. + """ + M, _ = self._swap(*self.shape) + N = idx.size + new_shape = self._swap(M, N) + if self.nnz == 0 or N == 0: + return self.__class__(new_shape, dtype=self.dtype) + + if idx.size * M < self.nnz: + # TODO (asi1024): Implement faster algorithm. + pass + + return self._tocsx()._major_index_fancy(idx)._tocsx() + + def _major_slice(self, idx, copy=False): + """Index along the major axis where idx is a slice object. + """ + M, N = self._swap(*self.shape) + start, stop, step = idx.indices(M) + + if start == 0 and stop == M and step == 1: + return self.copy() if copy else self + + M = len(range(start, stop, step)) + new_shape = self._swap(M, N) + + if step == 1: + if M == 0 or self.nnz == 0: + return self.__class__(new_shape, dtype=self.dtype) + return self.__class__( + _index._get_csr_submatrix_major_axis( + self.data, self.indices, self.indptr, start, stop), + shape=new_shape, copy=copy) + rows = cupy.arange(start, stop, step, dtype=self.indptr.dtype) + return self._major_index_fancy(rows) + + def _minor_slice(self, idx, copy=False): + """Index along the minor axis where idx is a slice object. + """ + M, N = self._swap(*self.shape) + start, stop, step = idx.indices(N) + + if start == 0 and stop == N and step == 1: + return self.copy() if copy else self + + N = len(range(start, stop, step)) + new_shape = self._swap(M, N) + + if N == 0 or self.nnz == 0: + return self.__class__(new_shape, dtype=self.dtype) + if step == 1: + return self.__class__( + _index._get_csr_submatrix_minor_axis( + self.data, self.indices, self.indptr, start, stop), + shape=new_shape, copy=False) + cols = cupy.arange(start, stop, step, dtype=self.indices.dtype) + return self._minor_index_fancy(cols) + + def _set_intXint(self, row, col, x): + i, j = self._swap(row, col) + self._set_many(i, j, x) + + def _set_arrayXarray(self, row, col, x): + i, j = self._swap(row, col) + self._set_many(i, j, x) + + def _set_arrayXarray_sparse(self, row, col, x): + # clear entries that will be overwritten + self._zero_many(*self._swap(row, col)) + + M, N = row.shape # matches col.shape + broadcast_row = M != 1 and x.shape[0] == 1 + broadcast_col = N != 1 and x.shape[1] == 1 + r, c = x.row, x.col + x = cupy.asarray(x.data, dtype=self.dtype) + if broadcast_row: + r = cupy.repeat(cupy.arange(M), r.size) + c = cupy.tile(c, M) + x = cupy.tile(x, M) + if broadcast_col: + r = cupy.repeat(r, N) + c = cupy.tile(cupy.arange(N), c.size) + x = cupy.repeat(x, N) + # only assign entries in the new sparsity structure + i, j = self._swap(row[r, c], col[r, c]) + self._set_many(i, j, x) + + def _prepare_indices(self, i, j): + M, N = self._swap(*self.shape) + + def check_bounds(indices, bound): + idx = indices.max() + if idx >= bound: + raise IndexError('index (%d) out of range (>= %d)' % + (idx, bound)) + idx = indices.min() + if idx < -bound: + raise IndexError('index (%d) out of range (< -%d)' % + (idx, bound)) + + i = cupy.array(i, dtype=self.indptr.dtype, + copy=True, ndmin=1).ravel() + j = cupy.array(j, dtype=self.indices.dtype, + copy=True, ndmin=1).ravel() + check_bounds(i, M) + check_bounds(j, N) + return i, j, M, N + + def _set_many(self, i, j, x): + """Sets value at each (i, j) to x + Here (i,j) index major and minor respectively, and must not contain + duplicate entries. + """ + i, j, M, N = self._prepare_indices(i, j) + x = cupy.array(x, dtype=self.dtype, copy=True, ndmin=1).ravel() + + new_sp = cupyx.scipy.sparse.csr_matrix( + (cupy.arange(self.nnz, dtype=cupy.float32), + self.indices, self.indptr), shape=(M, N)) + + offsets = new_sp._get_arrayXarray( + i, j, not_found_val=-1).astype(cupy.int32).ravel() + + mask = offsets > -1 + self.data[offsets[mask]] = x[mask] + + if mask.all(): + # only affects existing non-zero cells + return + + # only insertions remain + warnings.warn('Changing the sparsity structure of a ' + '{}_matrix is expensive.'.format(self.format), + _base.SparseEfficiencyWarning) + mask = ~mask + i = i[mask] + i[i < 0] += M + j = j[mask] + j[j < 0] += N + self._insert_many(i, j, x[mask]) + + def _zero_many(self, i, j): + """Sets value at each (i, j) to zero, preserving sparsity structure. + Here (i,j) index major and minor respectively. + """ + i, j, M, N = self._prepare_indices(i, j) + + new_sp = cupyx.scipy.sparse.csr_matrix( + (cupy.arange(self.nnz, dtype=cupy.float32), + self.indices, self.indptr), shape=(M, N)) + + offsets = new_sp._get_arrayXarray( + i, j, not_found_val=-1).astype(cupy.int32).ravel() + + # only assign zeros to the existing sparsity structure + self.data[offsets[offsets > -1]] = 0 + + def _perform_insert(self, indices_inserts, data_inserts, + rows, row_counts, idx_dtype): + """Insert new elements into current sparse matrix in sorted order""" + indptr_diff = cupy.diff(self.indptr) + indptr_diff[rows] += row_counts + + new_indptr = cupy.empty(self.indptr.shape, dtype=idx_dtype) + new_indptr[0] = idx_dtype(0) + new_indptr[1:] = indptr_diff + + # Build output arrays + cupy.cumsum(new_indptr, out=new_indptr) + out_nnz = int(new_indptr[-1]) + + new_indices = cupy.empty(out_nnz, dtype=idx_dtype) + new_data = cupy.empty(out_nnz, dtype=self.data.dtype) + + # Build an indexed indptr that contains the offsets for each + # row but only for in i, j, and x. + new_indptr_lookup = cupy.zeros(new_indptr.size, dtype=idx_dtype) + new_indptr_lookup[1:][rows] = row_counts + cupy.cumsum(new_indptr_lookup, out=new_indptr_lookup) + + _index._insert_many_populate_arrays( + indices_inserts, data_inserts, new_indptr_lookup, + self.indptr, self.indices, self.data, new_indptr, new_indices, + new_data, size=self.indptr.size-1) + + self.indptr = new_indptr + self.indices = new_indices + self.data = new_data + + def _insert_many(self, i, j, x): + """Inserts new nonzero at each (i, j) with value x + Here (i,j) index major and minor respectively. + i, j and x must be non-empty, 1d arrays. + Inserts each major group (e.g. all entries per row) at a time. + Maintains has_sorted_indices property. + Modifies i, j, x in place. + """ + + order = cupy.argsort(i) # stable for duplicates + i = i.take(order) + j = j.take(order) + x = x.take(order) + + # Update index data type + + idx_dtype = _sputils.get_index_dtype( + (self.indices, self.indptr), maxval=( + self.nnz + x.size)) + + self.indptr = self.indptr.astype(idx_dtype) + self.indices = self.indices.astype(idx_dtype) + self.data = self.data.astype(self.dtype) + + indptr_inserts, indices_inserts, data_inserts = \ + _index._select_last_indices(i, j, x, idx_dtype) + + rows, ui_indptr = cupy.unique(indptr_inserts, return_index=True) + + to_add = cupy.empty(ui_indptr.size+1, ui_indptr.dtype) + to_add[-1] = j.size + to_add[:-1] = ui_indptr + ui_indptr = to_add + + # Compute the counts for each row in the insertion array + row_counts = cupy.zeros(ui_indptr.size-1, dtype=idx_dtype) + cupy.add.at(row_counts, cupy.searchsorted(rows, indptr_inserts), 1) + + self._perform_insert(indices_inserts, data_inserts, + rows, row_counts, idx_dtype) + + def __get_has_canonical_format(self): + """Determine whether the matrix has sorted indices and no duplicates. + + Returns + bool: ``True`` if the above applies, otherwise ``False``. + + .. note:: + :attr:`has_canonical_format` implies :attr:`has_sorted_indices`, so + if the latter flag is ``False``, so will the former be; if the + former is found ``True``, the latter flag is also set. + + .. warning:: + Getting this property might synchronize the device. + + """ + # Modified from the SciPy counterpart. + + # In CuPy the implemented conversions do not exactly match those of + # SciPy's, so it's hard to put this exactly as where it is in SciPy, + # but this should do the job. + if self.data.size == 0: + self._has_canonical_format = True + # check to see if result was cached + elif not getattr(self, '_has_sorted_indices', True): + # not sorted => not canonical + self._has_canonical_format = False + elif not hasattr(self, '_has_canonical_format'): + is_canonical = self._has_canonical_format_kern( + self.indptr, self.indices, size=self.indptr.size-1) + self._has_canonical_format = bool(is_canonical.all()) + return self._has_canonical_format + + def __set_has_canonical_format(self, val): + """Taken from SciPy as is.""" + self._has_canonical_format = bool(val) + if val: + self.has_sorted_indices = True + + has_canonical_format = property(fget=__get_has_canonical_format, + fset=__set_has_canonical_format) + + def __get_sorted(self): + """Determine whether the matrix has sorted indices. + + Returns + bool: + ``True`` if the indices of the matrix are in sorted order, + otherwise ``False``. + + .. warning:: + Getting this property might synchronize the device. + + """ + # Modified from the SciPy counterpart. + + # In CuPy the implemented conversions do not exactly match those of + # SciPy's, so it's hard to put this exactly as where it is in SciPy, + # but this should do the job. + if self.data.size == 0: + self._has_sorted_indices = True + # check to see if result was cached + elif not hasattr(self, '_has_sorted_indices'): + is_sorted = self._has_sorted_indices_kern( + self.indptr, self.indices, size=self.indptr.size-1) + self._has_sorted_indices = bool(is_sorted.all()) + return self._has_sorted_indices + + def __set_sorted(self, val): + self._has_sorted_indices = bool(val) + + has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted) + + def get_shape(self): + """Returns the shape of the matrix. + + Returns: + tuple: Shape of the matrix. + + """ + return self._shape + + def getnnz(self, axis=None): + """Returns the number of stored values, including explicit zeros. + + Args: + axis: Not supported yet. + + Returns: + int: The number of stored values. + + """ + if axis is None: + return self.data.size + else: + raise ValueError + + def sorted_indices(self): + """Return a copy of this matrix with sorted indices + + .. warning:: + Calling this function might synchronize the device. + """ + # Taken from SciPy as is. + A = self.copy() + A.sort_indices() + return A + + def sort_indices(self): + # Unlike in SciPy, here this is implemented in child classes because + # each child needs to call its own sort function from cuSPARSE + raise NotImplementedError + + def sum_duplicates(self): + """Eliminate duplicate matrix entries by adding them together. + + .. note:: + This is an *in place* operation. + + .. warning:: + Calling this function might synchronize the device. + + .. seealso:: + :meth:`scipy.sparse.csr_matrix.sum_duplicates`, + :meth:`scipy.sparse.csc_matrix.sum_duplicates` + """ + if self.has_canonical_format: + return + # TODO(leofang): add a kernel for compressed sparse matrices without + # converting to coo + coo = self.tocoo() + coo.sum_duplicates() + self.__init__(coo.asformat(self.format)) + self.has_canonical_format = True + + ##################### + # Reduce operations # + ##################### + + def _minor_reduce(self, ufunc, axis, nonzero): + """Reduce nonzeros with a ufunc over the minor axis when non-empty + + Can be applied to a function of self.data by supplying data parameter. + Warning: this does not call sum_duplicates() + + Args: + ufunc (object): Function handle giving the operation to be + conducted. + axis (int): Matrix over which the reduction should be + conducted. + + Returns: + (cupy.ndarray): Reduce result for nonzeros in each + major_index. + + """ + out_shape = self.shape[1 - axis] + # Call to the appropriate kernel function + out = cupy.zeros(out_shape).astype(cupy.float64) + if nonzero: + kerns = {cupy.amax: self._max_nonzero_reduction_kern, + cupy.amin: self._min_nonzero_reduction_kern} + else: + kerns = {cupy.amax: self._max_reduction_kern, + cupy.amin: self._min_reduction_kern} + + kerns[ufunc]((out_shape,), (1,), + (self.data.astype(cupy.float64), + self.indptr[:len(self.indptr) - 1], + self.indptr[1:], cupy.int64(self.shape[axis]), + out)) + + return out + + def _arg_minor_reduce(self, ufunc, axis): + """Reduce nonzeros with a ufunc over the minor axis when non-empty + + Can be applied to a function of self.data by supplying data parameter. + Warning: this does not call sum_duplicates() + + Args: + ufunc (object): Function handle giving the operation to be + conducted. + axis (int): Maxtrix over which the reduction should be conducted + + Returns: + (cupy.ndarray): Reduce result for nonzeros in each + major_index + + """ + + # Call to the appropriate kernel function + # Create the vector to hold output + # Note: it's important to set "int" here, following what SciPy + # does, as the outcome dtype is platform dependent + out_shape = self.shape[1 - axis] + out = cupy.zeros(out_shape, dtype=int) + + # Perform the calculation + ker_name = '_arg_reduction<{}, {}>'.format( + _scalar.get_typename(self.data.dtype), + _scalar.get_typename(out.dtype)) + + if ufunc == cupy.argmax: + ker = self._max_arg_reduction_mod.get_function('max' + ker_name) + elif ufunc == cupy.argmin: + ker = self._min_arg_reduction_mod.get_function('min' + ker_name) + + ker((out_shape,), (1,), + (self.data, self.indices, + self.indptr[:len(self.indptr) - 1], + self.indptr[1:], cupy.int64(self.shape[axis]), + out)) + + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_construct.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_construct.py new file mode 100644 index 0000000000000000000000000000000000000000..3aa3d87b7a5667c1534128a0316cd396763e4070 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_construct.py @@ -0,0 +1,582 @@ +import numpy +import cupy +from cupyx.scipy.sparse import _coo +from cupyx.scipy.sparse import _csc +from cupyx.scipy.sparse import _csr +from cupyx.scipy.sparse import _dia +from cupyx.scipy.sparse import _sputils + + +def eye(m, n=None, k=0, dtype='d', format=None): + """Creates a sparse matrix with ones on diagonal. + + Args: + m (int): Number of rows. + n (int or None): Number of columns. If it is ``None``, + it makes a square matrix. + k (int): Diagonal to place ones on. + dtype: Type of a matrix to create. + format (str or None): Format of the result, e.g. ``format="csr"``. + + Returns: + cupyx.scipy.sparse.spmatrix: Created sparse matrix. + + .. seealso:: :func:`scipy.sparse.eye` + + """ + if n is None: + n = m + m, n = int(m), int(n) + + if m == n and k == 0: + if format in ['csr', 'csc']: + indptr = cupy.arange(n + 1, dtype='i') + indices = cupy.arange(n, dtype='i') + data = cupy.ones(n, dtype=dtype) + if format == 'csr': + cls = _csr.csr_matrix + else: + cls = _csc.csc_matrix + return cls((data, indices, indptr), (n, n)) + + elif format == 'coo': + row = cupy.arange(n, dtype='i') + col = cupy.arange(n, dtype='i') + data = cupy.ones(n, dtype=dtype) + return _coo.coo_matrix((data, (row, col)), (n, n)) + + diags = cupy.ones((1, max(0, min(m + k, n))), dtype=dtype) + return spdiags(diags, k, m, n).asformat(format) + + +def identity(n, dtype='d', format=None): + """Creates an identity matrix in sparse format. + + .. note:: + Currently it only supports csr, csc and coo formats. + + Args: + n (int): Number of rows and columns. + dtype: Type of a matrix to create. + format (str or None): Format of the result, e.g. ``format="csr"``. + + Returns: + cupyx.scipy.sparse.spmatrix: Created identity matrix. + + .. seealso:: :func:`scipy.sparse.identity` + + """ + return eye(n, n, dtype=dtype, format=format) + + +def spdiags(data, diags, m, n, format=None): + """Creates a sparse matrix from diagonals. + + Args: + data (cupy.ndarray): Matrix diagonals stored row-wise. + diags (cupy.ndarray): Diagonals to set. + m (int): Number of rows. + n (int): Number of cols. + format (str or None): Sparse format, e.g. ``format="csr"``. + + Returns: + cupyx.scipy.sparse.spmatrix: Created sparse matrix. + + .. seealso:: :func:`scipy.sparse.spdiags` + + """ + return _dia.dia_matrix((data, diags), shape=(m, n)).asformat(format) + + +def _compressed_sparse_stack(blocks, axis): + """Fast path for stacking CSR/CSC matrices + (i) vstack for CSR, (ii) hstack for CSC. + """ + other_axis = 1 if axis == 0 else 0 + data = cupy.concatenate([b.data for b in blocks]) + constant_dim = blocks[0].shape[other_axis] + idx_dtype = _sputils.get_index_dtype(arrays=[b.indptr for b in blocks], + maxval=max(data.size, constant_dim)) + indices = cupy.empty(data.size, dtype=idx_dtype) + indptr = cupy.empty(sum(b.shape[axis] + for b in blocks) + 1, dtype=idx_dtype) + last_indptr = idx_dtype(0) + sum_dim = 0 + sum_indices = 0 + for b in blocks: + if b.shape[other_axis] != constant_dim: + raise ValueError( + 'incompatible dimensions for axis %d' % other_axis) + indices[sum_indices:sum_indices+b.indices.size] = b.indices + sum_indices += b.indices.size + idxs = slice(sum_dim, sum_dim + b.shape[axis]) + indptr[idxs] = b.indptr[:-1] + indptr[idxs] += last_indptr + sum_dim += b.shape[axis] + last_indptr += b.indptr[-1] + indptr[-1] = last_indptr + if axis == 0: + return _csr.csr_matrix((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return _csc.csc_matrix((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + +def hstack(blocks, format=None, dtype=None): + """Stacks sparse matrices horizontally (column wise) + + Args: + blocks (sequence of cupyx.scipy.sparse.spmatrix): + sparse matrices to stack + + format (str): + sparse format of the result (e.g. "csr") + by default an appropriate sparse matrix format is returned. + This choice is subject to change. + dtype (dtype, optional): + The data-type of the output matrix. If not given, the dtype is + determined from that of ``blocks``. + + Returns: + cupyx.scipy.sparse.spmatrix: the stacked sparse matrix + + .. seealso:: :func:`scipy.sparse.hstack` + + Examples: + >>> from cupy import array + >>> from cupyx.scipy.sparse import csr_matrix, hstack + >>> A = csr_matrix(array([[1., 2.], [3., 4.]])) + >>> B = csr_matrix(array([[5.], [6.]])) + >>> hstack([A, B]).toarray() + array([[1., 2., 5.], + [3., 4., 6.]]) + """ + return bmat([blocks], format=format, dtype=dtype) + + +def vstack(blocks, format=None, dtype=None): + """Stacks sparse matrices vertically (row wise) + + Args: + blocks (sequence of cupyx.scipy.sparse.spmatrix) + sparse matrices to stack + format (str, optional): + sparse format of the result (e.g. "csr") + by default an appropriate sparse matrix format is returned. + This choice is subject to change. + dtype (dtype, optional): + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns: + cupyx.scipy.sparse.spmatrix: the stacked sparse matrix + + .. seealso:: :func:`scipy.sparse.vstack` + + Examples: + >>> from cupy import array + >>> from cupyx.scipy.sparse import csr_matrix, vstack + >>> A = csr_matrix(array([[1., 2.], [3., 4.]])) + >>> B = csr_matrix(array([[5., 6.]])) + >>> vstack([A, B]).toarray() + array([[1., 2.], + [3., 4.], + [5., 6.]]) + """ + return bmat([[b] for b in blocks], format=format, dtype=dtype) + + +def bmat(blocks, format=None, dtype=None): + """Builds a sparse matrix from sparse sub-blocks + + Args: + blocks (array_like): + Grid of sparse matrices with compatible shapes. + An entry of None implies an all-zero matrix. + format ({'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional): + The sparse format of the result (e.g. "csr"). By default an + appropriate sparse matrix format is returned. + This choice is subject to change. + dtype (dtype, optional): + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + Returns: + bmat (sparse matrix) + + .. seealso:: :func:`scipy.sparse.bmat` + + Examples: + >>> from cupy import array + >>> from cupyx.scipy.sparse import csr_matrix, bmat + >>> A = csr_matrix(array([[1., 2.], [3., 4.]])) + >>> B = csr_matrix(array([[5.], [6.]])) + >>> C = csr_matrix(array([[7.]])) + >>> bmat([[A, B], [None, C]]).toarray() + array([[1., 2., 5.], + [3., 4., 6.], + [0., 0., 7.]]) + >>> bmat([[A, None], [None, C]]).toarray() + array([[1., 2., 0.], + [3., 4., 0.], + [0., 0., 7.]]) + + """ + + # We assume here that blocks will be 2-D so we need to look, at most, + # 2 layers deep for the shape + # TODO(Corey J. Nolet): Check this assumption and raise ValueError + + # NOTE: We can't follow scipy exactly here + # since we don't have an `object` datatype + M = len(blocks) + N = len(blocks[0]) + + blocks_flat = [] + for m in range(M): + for n in range(N): + if blocks[m][n] is not None: + blocks_flat.append(blocks[m][n]) + + if len(blocks_flat) == 0: + return _coo.coo_matrix((0, 0), dtype=dtype) + + # check for fast path cases + if (N == 1 and format in (None, 'csr') and + all(isinstance(b, _csr.csr_matrix) + for b in blocks_flat)): + A = _compressed_sparse_stack(blocks_flat, 0) + if dtype is not None: + A = A.astype(dtype) + return A + elif (M == 1 and format in (None, 'csc') + and all(isinstance(b, _csc.csc_matrix) for b in blocks_flat)): + A = _compressed_sparse_stack(blocks_flat, 1) + if dtype is not None: + A = A.astype(dtype) + return A + + block_mask = numpy.zeros((M, N), dtype=bool) + brow_lengths = numpy.zeros(M+1, dtype=numpy.int64) + bcol_lengths = numpy.zeros(N+1, dtype=numpy.int64) + + # convert everything to COO format + for i in range(M): + for j in range(N): + if blocks[i][j] is not None: + A = _coo.coo_matrix(blocks[i][j]) + blocks[i][j] = A + block_mask[i][j] = True + + if brow_lengths[i+1] == 0: + brow_lengths[i+1] = A.shape[0] + elif brow_lengths[i+1] != A.shape[0]: + msg = ('blocks[{i},:] has incompatible row dimensions. ' + 'Got blocks[{i},{j}].shape[0] == {got}, ' + 'expected {exp}.'.format(i=i, j=j, + exp=brow_lengths[i+1], + got=A.shape[0])) + raise ValueError(msg) + + if bcol_lengths[j+1] == 0: + bcol_lengths[j+1] = A.shape[1] + elif bcol_lengths[j+1] != A.shape[1]: + msg = ('blocks[:,{j}] has incompatible row dimensions. ' + 'Got blocks[{i},{j}].shape[1] == {got}, ' + 'expected {exp}.'.format(i=i, j=j, + exp=bcol_lengths[j+1], + got=A.shape[1])) + raise ValueError(msg) + + nnz = sum(block.nnz for block in blocks_flat) + if dtype is None: + all_dtypes = [blk.dtype for blk in blocks_flat] + dtype = _sputils.upcast(*all_dtypes) if all_dtypes else None + + row_offsets = numpy.cumsum(brow_lengths) + col_offsets = numpy.cumsum(bcol_lengths) + + shape = (row_offsets[-1], col_offsets[-1]) + + data = cupy.empty(nnz, dtype=dtype) + idx_dtype = _sputils.get_index_dtype(maxval=max(shape)) + row = cupy.empty(nnz, dtype=idx_dtype) + col = cupy.empty(nnz, dtype=idx_dtype) + + nnz = 0 + ii, jj = numpy.nonzero(block_mask) + for i, j in zip(ii, jj): + B = blocks[int(i)][int(j)] + idx = slice(nnz, nnz + B.nnz) + data[idx] = B.data + row[idx] = B.row + row_offsets[i] + col[idx] = B.col + col_offsets[j] + nnz += B.nnz + + return _coo.coo_matrix((data, (row, col)), shape=shape).asformat(format) + + +def random(m, n, density=0.01, format='coo', dtype=None, + random_state=None, data_rvs=None): + """Generates a random sparse matrix. + + This function generates a random sparse matrix. First it selects non-zero + elements with given density ``density`` from ``(m, n)`` elements. + So the number of non-zero elements ``k`` is ``k = m * n * density``. + Value of each element is selected with ``data_rvs`` function. + + Args: + m (int): Number of rows. + n (int): Number of cols. + density (float): Ratio of non-zero entries. + format (str): Matrix format. + dtype (~cupy.dtype): Type of the returned matrix values. + random_state (cupy.random.RandomState or int): + State of random number generator. + If an integer is given, the method makes a new state for random + number generator and uses it. + If it is not given, the default state is used. + This state is used to generate random indexes for nonzero entries. + data_rvs (callable): A function to generate data for a random matrix. + If it is not given, `random_state.rand` is used. + + Returns: + cupyx.scipy.sparse.spmatrix: Generated matrix. + + .. seealso:: :func:`scipy.sparse.random` + + """ + if density < 0 or density > 1: + raise ValueError('density expected to be 0 <= density <= 1') + dtype = cupy.dtype(dtype) + if dtype.char not in 'fd': + raise NotImplementedError('type %s not supported' % dtype) + + mn = m * n + + k = int(density * m * n) + + if random_state is None: + random_state = cupy.random + elif isinstance(random_state, (int, cupy.integer)): + random_state = cupy.random.RandomState(random_state) + + if data_rvs is None: + data_rvs = random_state.rand + + ind = random_state.choice(mn, size=k, replace=False) + j = ind // m + i = ind - j * m + vals = data_rvs(k).astype(dtype) + return _coo.coo_matrix( + (vals, (i, j)), shape=(m, n)).asformat(format) + + +def rand(m, n, density=0.01, format='coo', dtype=None, random_state=None): + """Generates a random sparse matrix. + + See :func:`cupyx.scipy.sparse.random` for detail. + + Args: + m (int): Number of rows. + n (int): Number of cols. + density (float): Ratio of non-zero entries. + format (str): Matrix format. + dtype (~cupy.dtype): Type of the returned matrix values. + random_state (cupy.random.RandomState or int): + State of random number generator. + If an integer is given, the method makes a new state for random + number generator and uses it. + If it is not given, the default state is used. + This state is used to generate random indexes for nonzero entries. + + Returns: + cupyx.scipy.sparse.spmatrix: Generated matrix. + + .. seealso:: :func:`scipy.sparse.rand` + .. seealso:: :func:`cupyx.scipy.sparse.random` + + """ + return random(m, n, density, format, dtype, random_state) + + +def diags(diagonals, offsets=0, shape=None, format=None, dtype=None): + """Construct a sparse matrix from diagonals. + + Args: + diagonals (sequence of array_like): + Sequence of arrays containing the matrix diagonals, corresponding + to `offsets`. + offsets (sequence of int or an int): + Diagonals to set: + - k = 0 the main diagonal (default) + - k > 0 the k-th upper diagonal + - k < 0 the k-th lower diagonal + shape (tuple of int): + Shape of the result. If omitted, a square matrix large enough + to contain the diagonals is returned. + format ({"dia", "csr", "csc", "lil", ...}): + Matrix format of the result. By default (format=None) an + appropriate sparse matrix format is returned. This choice is + subject to change. + dtype (dtype): Data type of the matrix. + + Returns: + cupyx.scipy.sparse.spmatrix: Generated matrix. + + Notes: + This function differs from `spdiags` in the way it handles + off-diagonals. + + The result from `diags` is the sparse equivalent of:: + + cupy.diag(diagonals[0], offsets[0]) + + ... + + cupy.diag(diagonals[k], offsets[k]) + + Repeated diagonal offsets are disallowed. + """ + # if offsets is not a sequence, assume that there's only one diagonal + if _sputils.isscalarlike(offsets): + # now check that there's actually only one diagonal + if len(diagonals) == 0 or _sputils.isscalarlike(diagonals[0]): + diagonals = [cupy.atleast_1d(diagonals)] + else: + raise ValueError('Different number of diagonals and offsets.') + else: + diagonals = list(map(cupy.atleast_1d, diagonals)) + + if isinstance(offsets, cupy.ndarray): + offsets = offsets.get() + offsets = numpy.atleast_1d(offsets) + + # Basic check + if len(diagonals) != len(offsets): + raise ValueError('Different number of diagonals and offsets.') + + # Determine shape, if omitted + if shape is None: + m = len(diagonals[0]) + abs(int(offsets[0])) + shape = (m, m) + + # Determine data type, if omitted + if dtype is None: + dtype = cupy.common_type(*diagonals) + + # Construct data array + m, n = shape + + M = max([min(m + offset, n - offset) + max(0, offset) + for offset in offsets]) + M = max(0, M) + data_arr = cupy.zeros((len(offsets), M), dtype=dtype) + + K = min(m, n) + + for j, diagonal in enumerate(diagonals): + offset = offsets[j] + k = max(0, offset) + length = min(m + offset, n - offset, K) + if length < 0: + raise ValueError( + 'Offset %d (index %d) out of bounds' % (offset, j)) + try: + data_arr[j, k:k+length] = diagonal[..., :length] + except ValueError: + if len(diagonal) != length and len(diagonal) != 1: + raise ValueError( + 'Diagonal length (index %d: %d at offset %d) does not ' + 'agree with matrix size (%d, %d).' % ( + j, len(diagonal), offset, m, n)) + raise + + return _dia.dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format) + + +def kron(A, B, format=None): + """Kronecker product of sparse matrices A and B. + + Args: + A (cupyx.scipy.sparse.spmatrix): a sparse matrix. + B (cupyx.scipy.sparse.spmatrix): a sparse matrix. + format (str): the format of the returned sparse matrix. + + Returns: + cupyx.scipy.sparse.spmatrix: + Generated sparse matrix with the specified ``format``. + + .. seealso:: :func:`scipy.sparse.kron` + + """ + # TODO(leofang): support BSR format when it's added to CuPy + # TODO(leofang): investigate if possible to optimize performance by + # starting with CSR instead of COO matrices + + A = _coo.coo_matrix(A) + B = _coo.coo_matrix(B) + out_shape = (A.shape[0] * B.shape[0], A.shape[1] * B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return _coo.coo_matrix(out_shape).asformat(format) + + if max(out_shape[0], out_shape[1]) > cupy.iinfo('int32').max: + dtype = cupy.int64 + else: + dtype = cupy.int32 + + # expand entries of A into blocks + row = A.row.astype(dtype, copy=True) * B.shape[0] + row = row.repeat(B.nnz) + col = A.col.astype(dtype, copy=True) * B.shape[1] + col = col.repeat(B.nnz) + data = A.data.repeat(B.nnz) # data's dtype follows that of A in SciPy + + # increment block indices + row, col = row.reshape(-1, B.nnz), col.reshape(-1, B.nnz) + row += B.row + col += B.col + row, col = row.ravel(), col.ravel() + + # compute block entries + data = data.reshape(-1, B.nnz) * B.data + data = data.ravel() + + return _coo.coo_matrix( + (data, (row, col)), shape=out_shape).asformat(format) + + +def kronsum(A, B, format=None): + """Kronecker sum of sparse matrices A and B. + + Kronecker sum is the sum of two Kronecker products + ``kron(I_n, A) + kron(B, I_m)``, where ``I_n`` and ``I_m`` are identity + matrices. + + Args: + A (cupyx.scipy.sparse.spmatrix): a sparse matrix. + B (cupyx.scipy.sparse.spmatrix): a sparse matrix. + format (str): the format of the returned sparse matrix. + + Returns: + cupyx.scipy.sparse.spmatrix: + Generated sparse matrix with the specified ``format``. + + .. seealso:: :func:`scipy.sparse.kronsum` + + """ + A = _coo.coo_matrix(A) + B = _coo.coo_matrix(B) + + if A.shape[0] != A.shape[1]: + raise ValueError('A is not square matrix') + + if B.shape[0] != B.shape[1]: + raise ValueError('B is not square matrix') + + dtype = _sputils.upcast(A.dtype, B.dtype) + + L = kron(eye(B.shape[0], dtype=dtype), A, format=format) + R = kron(B, eye(A.shape[0], dtype=dtype), format=format) + + return (L + R).asformat(format) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_coo.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_coo.py new file mode 100644 index 0000000000000000000000000000000000000000..6b3d16154c61c0961a4248d7155ed3ca467ab877 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_coo.py @@ -0,0 +1,568 @@ +import numpy +try: + import scipy.sparse + _scipy_available = True +except ImportError: + _scipy_available = False + +import cupy +from cupy import _core +from cupyx.scipy.sparse import _base +from cupyx.scipy.sparse import _csc +from cupyx.scipy.sparse import _csr +from cupyx.scipy.sparse import _data as sparse_data +from cupyx.scipy.sparse import _util +from cupyx.scipy.sparse import _sputils + + +class coo_matrix(sparse_data._data_matrix): + + """COOrdinate format sparse matrix. + + This can be instantiated in several ways. + + ``coo_matrix(D)`` + ``D`` is a rank-2 :class:`cupy.ndarray`. + + ``coo_matrix(S)`` + ``S`` is another sparse matrix. It is equivalent to ``S.tocoo()``. + + ``coo_matrix((M, N), [dtype])`` + It constructs an empty matrix whose shape is ``(M, N)``. Default dtype + is float64. + + ``coo_matrix((data, (row, col)))`` + All ``data``, ``row`` and ``col`` are one-dimenaional + :class:`cupy.ndarray`. + + Args: + arg1: Arguments for the initializer. + shape (tuple): Shape of a matrix. Its length must be two. + dtype: Data type. It must be an argument of :class:`numpy.dtype`. + copy (bool): If ``True``, copies of given data are always used. + + .. seealso:: + :class:`scipy.sparse.coo_matrix` + + """ + + format = 'coo' + + _sum_duplicates_diff = _core.ElementwiseKernel( + 'raw T row, raw T col', + 'T diff', + ''' + T diff_out = 1; + if (i == 0 || row[i - 1] == row[i] && col[i - 1] == col[i]) { + diff_out = 0; + } + diff = diff_out; + ''', 'cupyx_scipy_sparse_coo_sum_duplicates_diff') + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + if shape is not None and len(shape) != 2: + raise ValueError( + 'Only two-dimensional sparse arrays are supported.') + + if _base.issparse(arg1): + x = arg1.asformat(self.format) + data = x.data + row = x.row + col = x.col + + if arg1.format != self.format: + # When formats are different, all arrays are already copied + copy = False + + if shape is None: + shape = arg1.shape + + self.has_canonical_format = x.has_canonical_format + + elif _util.isshape(arg1): + m, n = arg1 + m, n = int(m), int(n) + data = cupy.zeros(0, dtype if dtype else 'd') + row = cupy.zeros(0, dtype='i') + col = cupy.zeros(0, dtype='i') + # shape and copy argument is ignored + shape = (m, n) + copy = False + + self.has_canonical_format = True + + elif _scipy_available and scipy.sparse.issparse(arg1): + # Convert scipy.sparse to cupyx.scipy.sparse + x = arg1.tocoo() + data = cupy.array(x.data) + row = cupy.array(x.row, dtype='i') + col = cupy.array(x.col, dtype='i') + copy = False + if shape is None: + shape = arg1.shape + + self.has_canonical_format = x.has_canonical_format + + elif isinstance(arg1, tuple) and len(arg1) == 2: + try: + data, (row, col) = arg1 + except (TypeError, ValueError): + raise TypeError('invalid input format') + + if not (_base.isdense(data) and data.ndim == 1 and + _base.isdense(row) and row.ndim == 1 and + _base.isdense(col) and col.ndim == 1): + raise ValueError('row, column, and data arrays must be 1-D') + if not (len(data) == len(row) == len(col)): + raise ValueError( + 'row, column, and data array must all be the same length') + + self.has_canonical_format = False + + elif _base.isdense(arg1): + if arg1.ndim > 2: + raise TypeError('expected dimension <= 2 array or matrix') + dense = cupy.atleast_2d(arg1) + row, col = dense.nonzero() + data = dense[row, col] + shape = dense.shape + + self.has_canonical_format = True + + else: + raise TypeError('invalid input format') + + if dtype is None: + dtype = data.dtype + else: + dtype = numpy.dtype(dtype) + + if dtype not in (numpy.bool_, numpy.float32, numpy.float64, + numpy.complex64, numpy.complex128): + raise ValueError( + 'Only bool, float32, float64, complex64 and complex128' + ' are supported') + + data = data.astype(dtype, copy=copy) + row = row.astype('i', copy=copy) + col = col.astype('i', copy=copy) + + if shape is None: + if len(row) == 0 or len(col) == 0: + raise ValueError( + 'cannot infer dimensions from zero sized index arrays') + shape = (int(row.max()) + 1, int(col.max()) + 1) + + if len(data) > 0: + if row.max() >= shape[0]: + raise ValueError('row index exceeds matrix dimensions') + if col.max() >= shape[1]: + raise ValueError('column index exceeds matrix dimensions') + if row.min() < 0: + raise ValueError('negative row index found') + if col.min() < 0: + raise ValueError('negative column index found') + + sparse_data._data_matrix.__init__(self, data) + self.row = row + self.col = col + if not _util.isshape(shape): + raise ValueError('invalid shape (must be a 2-tuple of int)') + self._shape = int(shape[0]), int(shape[1]) + + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the index arrays + (i.e. .row and .col) are copied. + """ + if copy: + return coo_matrix( + (data, (self.row.copy(), self.col.copy())), + shape=self.shape, dtype=data.dtype) + else: + return coo_matrix( + (data, (self.row, self.col)), shape=self.shape, + dtype=data.dtype) + + def diagonal(self, k=0): + """Returns the k-th diagonal of the matrix. + + Args: + k (int, optional): Which diagonal to get, corresponding to elements + a[i, i+k]. Default: 0 (the main diagonal). + + Returns: + cupy.ndarray : The k-th diagonal. + """ + rows, cols = self.shape + if k <= -rows or k >= cols: + return cupy.empty(0, dtype=self.data.dtype) + diag = cupy.zeros(min(rows + min(k, 0), cols - max(k, 0)), + dtype=self.dtype) + diag_mask = (self.row + k) == self.col + + if self.has_canonical_format: + row = self.row[diag_mask] + data = self.data[diag_mask] + else: + diag_coo = coo_matrix((self.data[diag_mask], + (self.row[diag_mask], self.col[diag_mask])), + shape=self.shape) + diag_coo.sum_duplicates() + row = diag_coo.row + data = diag_coo.data + diag[row + min(k, 0)] = data + + return diag + + def setdiag(self, values, k=0): + """Set diagonal or off-diagonal elements of the array. + + Args: + values (ndarray): New values of the diagonal elements. Values may + have any length. If the diagonal is longer than values, then + the remaining diagonal entries will not be set. If values are + longer than the diagonal, then the remaining values are + ignored. If a scalar value is given, all of the diagonal is set + to it. + k (int, optional): Which off-diagonal to set, corresponding to + elements a[i,i+k]. Default: 0 (the main diagonal). + + """ + M, N = self.shape + if (k > 0 and k >= N) or (k < 0 and -k >= M): + raise ValueError("k exceeds matrix dimensions") + if values.ndim and not len(values): + return + idx_dtype = self.row.dtype + + # Determine which triples to keep and where to put the new ones. + full_keep = self.col - self.row != k + if k < 0: + max_index = min(M + k, N) + if values.ndim: + max_index = min(max_index, len(values)) + keep = cupy.logical_or(full_keep, self.col >= max_index) + new_row = cupy.arange(-k, -k + max_index, dtype=idx_dtype) + new_col = cupy.arange(max_index, dtype=idx_dtype) + else: + max_index = min(M, N - k) + if values.ndim: + max_index = min(max_index, len(values)) + keep = cupy.logical_or(full_keep, self.row >= max_index) + new_row = cupy.arange(max_index, dtype=idx_dtype) + new_col = cupy.arange(k, k + max_index, dtype=idx_dtype) + + # Define the array of data consisting of the entries to be added. + if values.ndim: + new_data = values[:max_index] + else: + new_data = cupy.full(max_index, values, dtype=self.dtype) + + # Update the internal structure. + self.row = cupy.concatenate((self.row[keep], new_row)) + self.col = cupy.concatenate((self.col[keep], new_col)) + self.data = cupy.concatenate((self.data[keep], new_data)) + self.has_canonical_format = False + + def eliminate_zeros(self): + """Removes zero entories in place.""" + ind = self.data != 0 + self.data = self.data[ind] + self.row = self.row[ind] + self.col = self.col[ind] + + def get_shape(self): + """Returns the shape of the matrix. + + Returns: + tuple: Shape of the matrix. + """ + return self._shape + + def getnnz(self, axis=None): + """Returns the number of stored values, including explicit zeros.""" + if axis is None: + return self.data.size + else: + raise ValueError + + def get(self, stream=None): + """Returns a copy of the array on host memory. + + Args: + stream (cupy.cuda.Stream): CUDA stream object. If it is given, the + copy runs asynchronously. Otherwise, the copy is synchronous. + + Returns: + scipy.sparse.coo_matrix: Copy of the array on host memory. + + """ + if not _scipy_available: + raise RuntimeError('scipy is not available') + + data = self.data.get(stream) + row = self.row.get(stream) + col = self.col.get(stream) + return scipy.sparse.coo_matrix( + (data, (row, col)), shape=self.shape) + + def reshape(self, *shape, order='C'): + """Gives a new shape to a sparse matrix without changing its data. + + Args: + shape (tuple): + The new shape should be compatible with the original shape. + order: {'C', 'F'} (optional) + Read the elements using this index order. 'C' means to read and + write the elements using C-like index order. 'F' means to read + and write the elements using Fortran-like index order. Default: + C. + + Returns: + cupyx.scipy.sparse.coo_matrix: sparse matrix + + """ + + shape = _sputils.check_shape(shape, self.shape) + + if shape == self.shape: + return self + + nrows, ncols = self.shape + + if order == 'C': # C to represent matrix in row major format + dtype = _sputils.get_index_dtype( + maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1))) + flat_indices = cupy.multiply(ncols, self.row, + dtype=dtype) + self.col + new_row, new_col = divmod(flat_indices, shape[1]) + elif order == 'F': + dtype = _sputils.get_index_dtype( + maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1))) + flat_indices = cupy.multiply(ncols, self.row, + dtype=dtype) + self.row + new_col, new_row = divmod(flat_indices, shape[0]) + else: + raise ValueError("'order' must be 'C' or 'F'") + + new_data = self.data + + return coo_matrix((new_data, (new_row, new_col)), shape=shape, + copy=False) + + def sum_duplicates(self): + """Eliminate duplicate matrix entries by adding them together. + + .. warning:: + When sorting the indices, CuPy follows the convention of cuSPARSE, + which is different from that of SciPy. Therefore, the order of the + output indices may differ: + + .. code-block:: python + + >>> # 1 0 0 + >>> # A = 1 1 0 + >>> # 1 1 1 + >>> data = cupy.array([1, 1, 1, 1, 1, 1], 'f') + >>> row = cupy.array([0, 1, 1, 2, 2, 2], 'i') + >>> col = cupy.array([0, 0, 1, 0, 1, 2], 'i') + >>> A = cupyx.scipy.sparse.coo_matrix((data, (row, col)), + ... shape=(3, 3)) + >>> a = A.get() + >>> A.sum_duplicates() + >>> a.sum_duplicates() # a is scipy.sparse.coo_matrix + >>> A.row + array([0, 1, 1, 2, 2, 2], dtype=int32) + >>> a.row + array([0, 1, 2, 1, 2, 2], dtype=int32) + >>> A.col + array([0, 0, 1, 0, 1, 2], dtype=int32) + >>> a.col + array([0, 0, 0, 1, 1, 2], dtype=int32) + + .. warning:: + Calling this function might synchronize the device. + + .. seealso:: + :meth:`scipy.sparse.coo_matrix.sum_duplicates` + + """ + if self.has_canonical_format: + return + # Note: The sorting order below follows the cuSPARSE convention (first + # row then col, so-called row-major) and differs from that of SciPy, as + # the cuSPARSE functions such as cusparseSpMV() assume this sorting + # order. + # See https://docs.nvidia.com/cuda/cusparse/index.html#coo-format + keys = cupy.stack([self.col, self.row]) + order = cupy.lexsort(keys) + src_data = self.data[order] + src_row = self.row[order] + src_col = self.col[order] + diff = self._sum_duplicates_diff(src_row, src_col, size=self.row.size) + + if diff[1:].all(): + # All elements have different indices. + data = src_data + row = src_row + col = src_col + else: + # TODO(leofang): move the kernels outside this method + index = cupy.cumsum(diff, dtype='i') + size = int(index[-1]) + 1 + data = cupy.zeros(size, dtype=self.data.dtype) + row = cupy.empty(size, dtype='i') + col = cupy.empty(size, dtype='i') + if self.data.dtype.kind == 'b': + cupy.ElementwiseKernel( + 'T src_data, int32 src_row, int32 src_col, int32 index', + 'raw T data, raw int32 row, raw int32 col', + ''' + if (src_data) data[index] = true; + row[index] = src_row; + col[index] = src_col; + ''', + 'cupyx_scipy_sparse_coo_sum_duplicates_assign' + )(src_data, src_row, src_col, index, data, row, col) + elif self.data.dtype.kind == 'f': + cupy.ElementwiseKernel( + 'T src_data, int32 src_row, int32 src_col, int32 index', + 'raw T data, raw int32 row, raw int32 col', + ''' + atomicAdd(&data[index], src_data); + row[index] = src_row; + col[index] = src_col; + ''', + 'cupyx_scipy_sparse_coo_sum_duplicates_assign' + )(src_data, src_row, src_col, index, data, row, col) + elif self.data.dtype.kind == 'c': + cupy.ElementwiseKernel( + 'T src_real, T src_imag, int32 src_row, int32 src_col, ' + 'int32 index', + 'raw T real, raw T imag, raw int32 row, raw int32 col', + ''' + atomicAdd(&real[index], src_real); + atomicAdd(&imag[index], src_imag); + row[index] = src_row; + col[index] = src_col; + ''', + 'cupyx_scipy_sparse_coo_sum_duplicates_assign_complex' + )(src_data.real, src_data.imag, src_row, src_col, index, + data.real, data.imag, row, col) + + self.data = data + self.row = row + self.col = col + self.has_canonical_format = True + + def toarray(self, order=None, out=None): + """Returns a dense matrix representing the same value. + + Args: + order (str): Not supported. + out: Not supported. + + Returns: + cupy.ndarray: Dense array representing the same value. + + .. seealso:: :meth:`scipy.sparse.coo_matrix.toarray` + + """ + return self.tocsr().toarray(order=order, out=out) + + def tocoo(self, copy=False): + """Converts the matrix to COOrdinate format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. + + Returns: + cupyx.scipy.sparse.coo_matrix: Converted matrix. + + """ + if copy: + return self.copy() + else: + return self + + def tocsc(self, copy=False): + """Converts the matrix to Compressed Sparse Column format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. Actually this option is ignored because all + arrays in a matrix cannot be shared in coo to csc conversion. + + Returns: + cupyx.scipy.sparse.csc_matrix: Converted matrix. + + """ + from cupyx import cusparse + + if self.nnz == 0: + return _csc.csc_matrix(self.shape, dtype=self.dtype) + # copy is silently ignored (in line with SciPy) because both + # sum_duplicates and coosort change the underlying data + x = self.copy() + x.sum_duplicates() + cusparse.coosort(x, 'c') + x = cusparse.coo2csc(x) + x.has_canonical_format = True + return x + + def tocsr(self, copy=False): + """Converts the matrix to Compressed Sparse Row format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. Actually this option is ignored because all + arrays in a matrix cannot be shared in coo to csr conversion. + + Returns: + cupyx.scipy.sparse.csr_matrix: Converted matrix. + + """ + from cupyx import cusparse + + if self.nnz == 0: + return _csr.csr_matrix(self.shape, dtype=self.dtype) + # copy is silently ignored (in line with SciPy) because both + # sum_duplicates and coosort change the underlying data + x = self.copy() + x.sum_duplicates() + cusparse.coosort(x, 'r') + x = cusparse.coo2csr(x) + x.has_canonical_format = True + return x + + def transpose(self, axes=None, copy=False): + """Returns a transpose matrix. + + Args: + axes: This option is not supported. + copy (bool): If ``True``, a returned matrix shares no data. + Otherwise, it shared data arrays as much as possible. + + Returns: + cupyx.scipy.sparse.spmatrix: Transpose matrix. + + """ + if axes is not None: + raise ValueError( + 'Sparse matrices do not support an \'axes\' parameter because ' + 'swapping dimensions is the only logical permutation.') + shape = self.shape[1], self.shape[0] + return coo_matrix( + (self.data, (self.col, self.row)), shape=shape, copy=copy) + + +def isspmatrix_coo(x): + """Checks if a given matrix is of COO format. + + Returns: + bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.coo_matrix`. + + """ + return isinstance(x, coo_matrix) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csc.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csc.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a62f83183fdfb70ca8087af65f607b13cecbd8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csc.py @@ -0,0 +1,413 @@ +try: + import scipy.sparse + _scipy_available = True +except ImportError: + _scipy_available = False + +import cupy +from cupy_backends.cuda.api import driver +from cupy_backends.cuda.api import runtime +import cupyx.scipy.sparse +from cupyx.scipy.sparse import _base +from cupyx.scipy.sparse import _compressed + + +class csc_matrix(_compressed._compressed_sparse_matrix): + + """Compressed Sparse Column matrix. + + This can be instantiated in several ways. + + ``csc_matrix(D)`` + ``D`` is a rank-2 :class:`cupy.ndarray`. + ``csc_matrix(S)`` + ``S`` is another sparse matrix. It is equivalent to ``S.tocsc()``. + ``csc_matrix((M, N), [dtype])`` + It constructs an empty matrix whose shape is ``(M, N)``. Default dtype + is float64. + ``csc_matrix((data, (row, col)))`` + All ``data``, ``row`` and ``col`` are one-dimenaional + :class:`cupy.ndarray`. + ``csc_matrix((data, indices, indptr))`` + All ``data``, ``indices`` and ``indptr`` are one-dimenaional + :class:`cupy.ndarray`. + + Args: + arg1: Arguments for the initializer. + shape (tuple): Shape of a matrix. Its length must be two. + dtype: Data type. It must be an argument of :class:`numpy.dtype`. + copy (bool): If ``True``, copies of given arrays are always used. + + .. seealso:: + :class:`scipy.sparse.csc_matrix` + + """ + + format = 'csc' + + def get(self, stream=None): + """Returns a copy of the array on host memory. + + .. warning:: + You need to install SciPy to use this method. + + Args: + stream (cupy.cuda.Stream): CUDA stream object. If it is given, the + copy runs asynchronously. Otherwise, the copy is synchronous. + + Returns: + scipy.sparse.csc_matrix: Copy of the array on host memory. + + """ + if not _scipy_available: + raise RuntimeError('scipy is not available') + data = self.data.get(stream) + indices = self.indices.get(stream) + indptr = self.indptr.get(stream) + return scipy.sparse.csc_matrix( + (data, indices, indptr), shape=self._shape) + + def _convert_dense(self, x): + from cupyx import cusparse + + if cusparse.check_availability('denseToSparse'): + m = cusparse.denseToSparse(x, format='csc') + else: + m = cusparse.dense2csc(x) + return m.data, m.indices, m.indptr + + def _swap(self, x, y): + return (y, x) + + def __mul__(self, other): + from cupyx import cusparse + + if cupy.isscalar(other): + self.sum_duplicates() + return self._with_data(self.data * other) + elif cupyx.scipy.sparse.isspmatrix_csr(other): + self.sum_duplicates() + other.sum_duplicates() + if cusparse.check_availability('spgemm'): + a = self.tocsr() + a.sum_duplicates() + return cusparse.spgemm(a, other) + elif cusparse.check_availability('csrgemm') and not runtime.is_hip: + # trans=True is still buggy as of ROCm 4.2.0 + a = self.T + return cusparse.csrgemm(a, other, transa=True) + elif cusparse.check_availability('csrgemm2'): + a = self.tocsr() + a.sum_duplicates() + return cusparse.csrgemm2(a, other) + else: + raise AssertionError + elif isspmatrix_csc(other): + self.sum_duplicates() + other.sum_duplicates() + if cusparse.check_availability('csrgemm') and not runtime.is_hip: + # trans=True is still buggy as of ROCm 4.2.0 + a = self.T + b = other.T + return cusparse.csrgemm(a, b, transa=True, transb=True) + elif cusparse.check_availability('csrgemm2'): + a = self.tocsr() + b = other.tocsr() + a.sum_duplicates() + b.sum_duplicates() + return cusparse.csrgemm2(a, b) + elif cusparse.check_availability('spgemm'): + a = self.tocsr() + b = other.tocsr() + a.sum_duplicates() + b.sum_duplicates() + return cusparse.spgemm(a, b) + else: + raise AssertionError + elif cupyx.scipy.sparse.isspmatrix(other): + return self * other.tocsr() + elif _base.isdense(other): + if other.ndim == 0: + self.sum_duplicates() + return self._with_data(self.data * other) + elif other.ndim == 1: + self.sum_duplicates() + if ( + cusparse.check_availability('csrmv') + and ( + not runtime.is_hip + or driver.get_build_version() >= 5_00_00000 + ) + ): + # trans=True is buggy as of ROCm 4.2.0 + csrmv = cusparse.csrmv + elif (cusparse.check_availability('spmv') + and not runtime.is_hip): + # trans=True is buggy as of ROCm 4.2.0 + # (I got HIPSPARSE_STATUS_INTERNAL_ERROR...) + csrmv = cusparse.spmv + else: + raise AssertionError + return csrmv(self.T, cupy.asfortranarray(other), transa=True) + elif other.ndim == 2: + self.sum_duplicates() + if ( + cusparse.check_availability('csrmm2') + and ( + not runtime.is_hip + or driver.get_build_version() >= 5_00_00000 + ) + ): + # trans=True is buggy as of ROCm 4.2.0 + csrmm = cusparse.csrmm2 + elif cusparse.check_availability('spmm'): + csrmm = cusparse.spmm + else: + raise AssertionError + return csrmm(self.T, cupy.asfortranarray(other), transa=True) + else: + raise ValueError('could not interpret dimensions') + else: + return NotImplemented + + # TODO(unno): Implement check_format + # TODO(unno): Implement diagonal + + def eliminate_zeros(self): + """Removes zero entories in place.""" + t = self.T + t.eliminate_zeros() + compress = t.T + self.data = compress.data + self.indices = compress.indices + self.indptr = compress.indptr + + # TODO(unno): Implement maximum + # TODO(unno): Implement minimum + # TODO(unno): Implement multiply + # TODO(unno): Implement prune + + def sort_indices(self): + """Sorts the indices of this matrix *in place*. + + .. warning:: + Calling this function might synchronize the device. + + """ + from cupyx import cusparse + + if not self.has_sorted_indices: + cusparse.cscsort(self) + self.has_sorted_indices = True + + def toarray(self, order=None, out=None): + """Returns a dense matrix representing the same value. + + Args: + order ({'C', 'F', None}): Whether to store data in C (row-major) + order or F (column-major) order. Default is C-order. + out: Not supported. + + Returns: + cupy.ndarray: Dense array representing the same matrix. + + .. seealso:: :meth:`scipy.sparse.csc_matrix.toarray` + + """ + from cupyx import cusparse + + if order is None: + order = 'C' + order = order.upper() + if self.nnz == 0: + return cupy.zeros(shape=self.shape, dtype=self.dtype, order=order) + + x = self.copy() + x.has_canonical_format = False # need to enforce sum_duplicates + x.sum_duplicates() + if (cusparse.check_availability('sparseToDense') + and (not runtime.is_hip or x.nnz > 0)): + # On HIP, nnz=0 is problematic as of ROCm 4.2.0 + y = cusparse.sparseToDense(x) + if order == 'F': + return y + elif order == 'C': + return cupy.ascontiguousarray(y) + else: + raise ValueError('order not understood') + else: + # csc2dense and csr2dense returns F-contiguous array. + if order == 'C': + # To return C-contiguous array, it uses transpose. + return cusparse.csr2dense(x.T).T + elif order == 'F': + return cusparse.csc2dense(x) + else: + raise ValueError('order not understood') + + def _add_sparse(self, other, alpha, beta): + from cupyx import cusparse + + self.sum_duplicates() + other = other.tocsc().T + other.sum_duplicates() + if cusparse.check_availability('csrgeam2'): + csrgeam = cusparse.csrgeam2 + elif cusparse.check_availability('csrgeam'): + csrgeam = cusparse.csrgeam + else: + raise NotImplementedError + return csrgeam(self.T, other, alpha, beta).T + + # TODO(unno): Implement tobsr + + def tocoo(self, copy=False): + """Converts the matrix to COOrdinate format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. + + Returns: + cupyx.scipy.sparse.coo_matrix: Converted matrix. + + """ + from cupyx import cusparse + + if copy: + data = self.data.copy() + indices = self.indices.copy() + else: + data = self.data + indices = self.indices + + return cusparse.csc2coo(self, data, indices) + + def tocsc(self, copy=None): + """Converts the matrix to Compressed Sparse Column format. + + Args: + copy (bool): If ``False``, the method returns itself. + Otherwise it makes a copy of the matrix. + + Returns: + cupyx.scipy.sparse.csc_matrix: Converted matrix. + + """ + if copy: + return self.copy() + else: + return self + + def tocsr(self, copy=False): + """Converts the matrix to Compressed Sparse Row format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. Actually this option is ignored because all + arrays in a matrix cannot be shared in csr to csc conversion. + + Returns: + cupyx.scipy.sparse.csr_matrix: Converted matrix. + + """ + from cupyx import cusparse + + # copy is ignored + if cusparse.check_availability('csc2csr'): + csc2csr = cusparse.csc2csr + elif cusparse.check_availability('csc2csrEx2'): + csc2csr = cusparse.csc2csrEx2 + else: + raise NotImplementedError + # don't touch has_sorted_indices, as cuSPARSE made no guarantee + return csc2csr(self) + + def _tocsx(self): + """Inverts the format. + """ + return self.tocsr() + + # TODO(unno): Implement todia + # TODO(unno): Implement todok + # TODO(unno): Implement tolil + + def transpose(self, axes=None, copy=False): + """Returns a transpose matrix. + + Args: + axes: This option is not supported. + copy (bool): If ``True``, a returned matrix shares no data. + Otherwise, it shared data arrays as much as possible. + + Returns: + cupyx.scipy.sparse.csr_matrix: `self` with the dimensions reversed. + + """ + if axes is not None: + raise ValueError( + 'Sparse matrices do not support an \'axes\' parameter because ' + 'swapping dimensions is the only logical permutation.') + + shape = self.shape[1], self.shape[0] + trans = cupyx.scipy.sparse.csr_matrix( + (self.data, self.indices, self.indptr), shape=shape, copy=copy) + trans.has_canonical_format = self.has_canonical_format + return trans + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) + CSR matrix (row vector). + + Args: + i (integer): Row + + Returns: + cupyx.scipy.sparse.csc_matrix: Sparse matrix with single row + """ + return self._minor_slice(slice(i, i + 1), copy=True).tocsr() + + def getcol(self, i): + """Returns a copy of column i of the matrix, as a (m x 1) + CSC matrix (column vector). + + Args: + i (integer): Column + + Returns: + cupyx.scipy.sparse.csc_matrix: Sparse matrix with single column + """ + return self._major_slice(slice(i, i + 1), copy=True) + + def _get_intXarray(self, row, col): + row = slice(row, row + 1) + return self._major_index_fancy(col)._minor_slice(row) + + def _get_intXslice(self, row, col): + row = slice(row, row + 1) + copy = col.step in (1, None) + return self._major_slice(col)._minor_slice(row, copy=copy) + + def _get_sliceXint(self, row, col): + col = slice(col, col + 1) + return self._major_slice(col)._minor_slice(row, copy=True) + + def _get_sliceXarray(self, row, col): + return self._major_index_fancy(col)._minor_slice(row) + + def _get_arrayXint(self, row, col): + col = slice(col, col + 1) + return self._major_slice(col)._minor_index_fancy(row) + + def _get_arrayXslice(self, row, col): + return self._major_slice(col)._minor_index_fancy(row) + + +def isspmatrix_csc(x): + """Checks if a given matrix is of CSC format. + + Returns: + bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.csc_matrix`. + + """ + return isinstance(x, csc_matrix) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csr.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csr.py new file mode 100644 index 0000000000000000000000000000000000000000..5dd270318d4c06a5f312307c8a77bb2749ae2148 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csr.py @@ -0,0 +1,1242 @@ +import operator +import warnings + +import numpy + +try: + import scipy.sparse + _scipy_available = True +except ImportError: + _scipy_available = False + +import cupy +from cupy._core import _accelerator +from cupy.cuda import cub +from cupy.cuda import runtime +from cupyx.scipy.sparse import _base +from cupyx.scipy.sparse import _compressed +from cupyx.scipy.sparse import _csc +from cupyx.scipy.sparse import SparseEfficiencyWarning +from cupyx.scipy.sparse import _util + + +class csr_matrix(_compressed._compressed_sparse_matrix): + + """Compressed Sparse Row matrix. + + This can be instantiated in several ways. + + ``csr_matrix(D)`` + ``D`` is a rank-2 :class:`cupy.ndarray`. + ``csr_matrix(S)`` + ``S`` is another sparse matrix. It is equivalent to ``S.tocsr()``. + ``csr_matrix((M, N), [dtype])`` + It constructs an empty matrix whose shape is ``(M, N)``. Default dtype + is float64. + ``csr_matrix((data, (row, col)))`` + All ``data``, ``row`` and ``col`` are one-dimenaional + :class:`cupy.ndarray`. + ``csr_matrix((data, indices, indptr))`` + All ``data``, ``indices`` and ``indptr`` are one-dimenaional + :class:`cupy.ndarray`. + + Args: + arg1: Arguments for the initializer. + shape (tuple): Shape of a matrix. Its length must be two. + dtype: Data type. It must be an argument of :class:`numpy.dtype`. + copy (bool): If ``True``, copies of given arrays are always used. + + .. seealso:: + :class:`scipy.sparse.csr_matrix` + + """ + + format = 'csr' + + def get(self, stream=None): + """Returns a copy of the array on host memory. + + Args: + stream (cupy.cuda.Stream): CUDA stream object. If it is given, the + copy runs asynchronously. Otherwise, the copy is synchronous. + + Returns: + scipy.sparse.csr_matrix: Copy of the array on host memory. + + """ + if not _scipy_available: + raise RuntimeError('scipy is not available') + data = self.data.get(stream) + indices = self.indices.get(stream) + indptr = self.indptr.get(stream) + return scipy.sparse.csr_matrix( + (data, indices, indptr), shape=self._shape) + + def _convert_dense(self, x): + m = dense2csr(x) + return m.data, m.indices, m.indptr + + def _swap(self, x, y): + return (x, y) + + def _add_sparse(self, other, alpha, beta): + from cupyx import cusparse + + self.sum_duplicates() + other = other.tocsr() + other.sum_duplicates() + if cusparse.check_availability('csrgeam2'): + csrgeam = cusparse.csrgeam2 + elif cusparse.check_availability('csrgeam'): + csrgeam = cusparse.csrgeam + else: + raise NotImplementedError + return csrgeam(self, other, alpha, beta) + + def _comparison(self, other, op, op_name): + if _util.isscalarlike(other): + data = cupy.asarray(other, dtype=self.dtype).reshape(1) + if numpy.isnan(data[0]): + if op_name == '_ne_': + return csr_matrix(cupy.ones(self.shape, dtype=numpy.bool_)) + else: + return csr_matrix(self.shape, dtype=numpy.bool_) + indices = cupy.zeros((1,), dtype=numpy.int32) + indptr = cupy.arange(2, dtype=numpy.int32) + other = csr_matrix((data, indices, indptr), shape=(1, 1)) + return binopt_csr(self, other, op_name) + elif _util.isdense(other): + return op(self.todense(), other) + elif isspmatrix_csr(other): + self.sum_duplicates() + other.sum_duplicates() + if op_name in ('_ne_', '_lt_', '_gt_'): + return binopt_csr(self, other, op_name) + + warnings.warn( + "Comparing sparse matrices using ==, <=, and >= is " + "inefficient, try using !=, <, or > instead.", + SparseEfficiencyWarning) + if op_name == '_eq_': + opposite_op_name = '_ne_' + elif op_name == '_le_': + opposite_op_name = '_gt_' + elif op_name == '_ge_': + opposite_op_name = '_lt_' + res = binopt_csr(self, other, opposite_op_name) + out = cupy.logical_not(res.toarray()) + return csr_matrix(out) + raise NotImplementedError + + def __eq__(self, other): + return self._comparison(other, operator.eq, '_eq_') + + def __ne__(self, other): + return self._comparison(other, operator.ne, '_ne_') + + def __lt__(self, other): + return self._comparison(other, operator.lt, '_lt_') + + def __gt__(self, other): + return self._comparison(other, operator.gt, '_gt_') + + def __le__(self, other): + return self._comparison(other, operator.le, '_le_') + + def __ge__(self, other): + return self._comparison(other, operator.ge, '_ge_') + + def __mul__(self, other): + from cupyx import cusparse + + if cupy.isscalar(other): + self.sum_duplicates() + return self._with_data(self.data * other) + elif isspmatrix_csr(other): + self.sum_duplicates() + other.sum_duplicates() + if cusparse.check_availability('spgemm'): + return cusparse.spgemm(self, other) + elif cusparse.check_availability('csrgemm2'): + return cusparse.csrgemm2(self, other) + elif cusparse.check_availability('csrgemm'): + return cusparse.csrgemm(self, other) + else: + raise AssertionError + elif _csc.isspmatrix_csc(other): + self.sum_duplicates() + other.sum_duplicates() + if cusparse.check_availability('csrgemm') and not runtime.is_hip: + # trans=True is still buggy as of ROCm 4.2.0 + return cusparse.csrgemm(self, other.T, transb=True) + elif cusparse.check_availability('spgemm'): + b = other.tocsr() + b.sum_duplicates() + return cusparse.spgemm(self, b) + elif cusparse.check_availability('csrgemm2'): + b = other.tocsr() + b.sum_duplicates() + return cusparse.csrgemm2(self, b) + else: + raise AssertionError + elif _base.isspmatrix(other): + return self * other.tocsr() + elif _base.isdense(other): + if other.ndim == 0: + self.sum_duplicates() + return self._with_data(self.data * other) + elif other.ndim == 1: + self.sum_duplicates() + other = cupy.asfortranarray(other) + # need extra padding to ensure not stepping on the CUB bug, + # see cupy/cupy#3679 for discussion + is_cub_safe = (self.indptr.data.mem.size + > self.indptr.size * self.indptr.dtype.itemsize) + # CUB spmv is buggy since CUDA 11.0, see + # https://github.com/cupy/cupy/issues/3822#issuecomment-782607637 + is_cub_safe &= (cub._get_cuda_build_version() < 11000) + for accelerator in _accelerator.get_routine_accelerators(): + if (accelerator == _accelerator.ACCELERATOR_CUB + and not runtime.is_hip + and is_cub_safe and other.flags.c_contiguous): + return cub.device_csrmv( + self.shape[0], self.shape[1], self.nnz, + self.data, self.indptr, self.indices, other) + if (cusparse.check_availability('csrmvEx') and self.nnz > 0 and + cusparse.csrmvExIsAligned(self, other)): + # csrmvEx does not work if nnz == 0 + csrmv = cusparse.csrmvEx + elif cusparse.check_availability('csrmv'): + csrmv = cusparse.csrmv + elif cusparse.check_availability('spmv'): + csrmv = cusparse.spmv + else: + raise AssertionError + return csrmv(self, other) + elif other.ndim == 2: + self.sum_duplicates() + if cusparse.check_availability('csrmm2'): + csrmm = cusparse.csrmm2 + elif cusparse.check_availability('spmm'): + csrmm = cusparse.spmm + else: + raise AssertionError + return csrmm(self, cupy.asfortranarray(other)) + else: + raise ValueError('could not interpret dimensions') + else: + return NotImplemented + + def __div__(self, other): + raise NotImplementedError + + def __rdiv__(self, other): + raise NotImplementedError + + def __truediv__(self, other): + """Point-wise division by another matrix, vector or scalar""" + if _util.isscalarlike(other): + dtype = self.dtype + if dtype == numpy.float32: + # Note: This is a work-around to make the output dtype the same + # as SciPy. It might be SciPy version dependent. + dtype = numpy.float64 + dtype = cupy.result_type(dtype, other) + d = cupy.reciprocal(other, dtype=dtype) + return multiply_by_scalar(self, d) + elif _util.isdense(other): + other = cupy.atleast_2d(other) + other = cupy.broadcast_to(other, self.shape) + check_shape_for_pointwise_op(self.shape, other.shape) + ret = self.tocoo() + ret.data = _cupy_divide_by_dense()( + ret.data, ret.row, ret.col, ret.shape[1], other) + return ret + elif _base.isspmatrix(other): + # Note: If broadcasting is needed, an exception is raised here for + # compatibility with SciPy, as SciPy does not support broadcasting + # in the "sparse / sparse" case. + check_shape_for_pointwise_op(self.shape, other.shape, + allow_broadcasting=False) + dtype = numpy.promote_types(self.dtype, other.dtype) + if dtype.char not in 'FD': + dtype = numpy.promote_types(numpy.float64, dtype) + # Note: The following implementation converts two sparse matrices + # into dense matrices and then performs a point-wise division, + # which can use lots of memory. + self_dense = self.todense().astype(dtype, copy=False) + return self_dense / other.todense() + return NotImplemented + + def __rtruediv__(self, other): + return NotImplemented + + # TODO(unno): Implement check_format + + def diagonal(self, k=0): + rows, cols = self.shape + ylen = min(rows + min(k, 0), cols - max(k, 0)) + if ylen <= 0: + return cupy.empty(0, dtype=self.dtype) + self.sum_duplicates() + y = cupy.empty(ylen, dtype=self.dtype) + _cupy_csr_diagonal()(k, rows, cols, self.data, self.indptr, + self.indices, y) + return y + + def eliminate_zeros(self): + """Removes zero entories in place.""" + from cupyx import cusparse + + compress = cusparse.csr2csr_compress(self, 0) + self.data = compress.data + self.indices = compress.indices + self.indptr = compress.indptr + + def _maximum_minimum(self, other, cupy_op, op_name, dense_check): + if _util.isscalarlike(other): + other = cupy.asarray(other, dtype=self.dtype) + if dense_check(other): + dtype = self.dtype + # Note: This is a work-around to make the output dtype the same + # as SciPy. It might be SciPy version dependent. + if dtype == numpy.float32: + dtype = numpy.float64 + elif dtype == numpy.complex64: + dtype = numpy.complex128 + dtype = cupy.result_type(dtype, other) + other = other.astype(dtype, copy=False) + # Note: The computation steps below are different from SciPy. + new_array = cupy_op(self.todense(), other) + return csr_matrix(new_array) + else: + self.sum_duplicates() + new_data = cupy_op(self.data, other) + return csr_matrix((new_data, self.indices, self.indptr), + shape=self.shape, dtype=self.dtype) + elif _util.isdense(other): + self.sum_duplicates() + other = cupy.atleast_2d(other) + return cupy_op(self.todense(), other) + elif isspmatrix_csr(other): + self.sum_duplicates() + other.sum_duplicates() + return binopt_csr(self, other, op_name) + raise NotImplementedError + + def maximum(self, other): + return self._maximum_minimum(other, cupy.maximum, '_maximum_', + lambda x: x > 0) + + def minimum(self, other): + return self._maximum_minimum(other, cupy.minimum, '_minimum_', + lambda x: x < 0) + + def multiply(self, other): + """Point-wise multiplication by another matrix, vector or scalar""" + if cupy.isscalar(other): + return multiply_by_scalar(self, other) + elif _util.isdense(other): + self.sum_duplicates() + other = cupy.atleast_2d(other) + return multiply_by_dense(self, other) + elif isspmatrix_csr(other): + self.sum_duplicates() + other.sum_duplicates() + return multiply_by_csr(self, other) + else: + msg = 'expected scalar, dense matrix/vector or csr matrix' + raise TypeError(msg) + + # TODO(unno): Implement prune + + def setdiag(self, values, k=0): + """Set diagonal or off-diagonal elements of the array.""" + rows, cols = self.shape + row_st, col_st = max(0, -k), max(0, k) + x_len = min(rows - row_st, cols - col_st) + if x_len <= 0: + raise ValueError('k exceeds matrix dimensions') + values = values.astype(self.dtype) + if values.ndim == 0: + # broadcast + x_data = cupy.full((x_len,), values, dtype=self.dtype) + else: + x_len = min(x_len, values.size) + x_data = values[:x_len] + x_indices = cupy.arange(col_st, col_st + x_len, dtype='i') + x_indptr = cupy.zeros((rows + 1,), dtype='i') + x_indptr[row_st:row_st+x_len+1] = cupy.arange(x_len+1, dtype='i') + x_indptr[row_st+x_len+1:] = x_len + x_data -= self.diagonal(k=k)[:x_len] + y = self + csr_matrix((x_data, x_indices, x_indptr), shape=self.shape) + self.data = y.data + self.indices = y.indices + self.indptr = y.indptr + + def sort_indices(self): + """Sorts the indices of this matrix *in place*. + + .. warning:: + Calling this function might synchronize the device. + + """ + from cupyx import cusparse + + if not self.has_sorted_indices: + cusparse.csrsort(self) + self.has_sorted_indices = True + + def toarray(self, order=None, out=None): + """Returns a dense matrix representing the same value. + + Args: + order ({'C', 'F', None}): Whether to store data in C (row-major) + order or F (column-major) order. Default is C-order. + out: Not supported. + + Returns: + cupy.ndarray: Dense array representing the same matrix. + + .. seealso:: :meth:`scipy.sparse.csr_matrix.toarray` + + """ + from cupyx import cusparse + + order = 'C' if order is None else order.upper() + if self.nnz == 0: + return cupy.zeros(shape=self.shape, dtype=self.dtype, order=order) + + if self.dtype.char not in 'fdFD': + return csr2dense(self, order) + + x = self.copy() + x.has_canonical_format = False # need to enforce sum_duplicates + x.sum_duplicates() + if (cusparse.check_availability('sparseToDense') + and (not runtime.is_hip or (x.nnz > 0))): + # On HIP, nnz=0 is problematic as of ROCm 4.2.0 + y = cusparse.sparseToDense(x) + if order == 'F': + return y + elif order == 'C': + return cupy.ascontiguousarray(y) + else: + raise ValueError('order not understood') + else: + # csr2dense returns F-contiguous array. + if order == 'C': + # To return C-contiguous array, it uses transpose. + return cusparse.csc2dense(x.T).T + elif order == 'F': + return cusparse.csr2dense(x) + else: + raise ValueError('order not understood') + + def tobsr(self, blocksize=None, copy=False): + # TODO(unno): Implement tobsr + raise NotImplementedError + + def tocoo(self, copy=False): + """Converts the matrix to COOrdinate format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. + + Returns: + cupyx.scipy.sparse.coo_matrix: Converted matrix. + + """ + from cupyx import cusparse + + if copy: + data = self.data.copy() + indices = self.indices.copy() + else: + data = self.data + indices = self.indices + + return cusparse.csr2coo(self, data, indices) + + def tocsc(self, copy=False): + """Converts the matrix to Compressed Sparse Column format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. Actually this option is ignored because all + arrays in a matrix cannot be shared in csr to csc conversion. + + Returns: + cupyx.scipy.sparse.csc_matrix: Converted matrix. + + """ + from cupyx import cusparse + + # copy is ignored + if cusparse.check_availability('csr2csc'): + csr2csc = cusparse.csr2csc + elif cusparse.check_availability('csr2cscEx2'): + csr2csc = cusparse.csr2cscEx2 + else: + raise NotImplementedError + # don't touch has_sorted_indices, as cuSPARSE made no guarantee + return csr2csc(self) + + def tocsr(self, copy=False): + """Converts the matrix to Compressed Sparse Row format. + + Args: + copy (bool): If ``False``, the method returns itself. + Otherwise it makes a copy of the matrix. + + Returns: + cupyx.scipy.sparse.csr_matrix: Converted matrix. + + """ + if copy: + return self.copy() + else: + return self + + def _tocsx(self): + """Inverts the format. + """ + return self.tocsc() + + def todia(self, copy=False): + # TODO(unno): Implement todia + raise NotImplementedError + + def todok(self, copy=False): + # TODO(unno): Implement todok + raise NotImplementedError + + def tolil(self, copy=False): + # TODO(unno): Implement tolil + raise NotImplementedError + + def transpose(self, axes=None, copy=False): + """Returns a transpose matrix. + + Args: + axes: This option is not supported. + copy (bool): If ``True``, a returned matrix shares no data. + Otherwise, it shared data arrays as much as possible. + + Returns: + cupyx.scipy.sparse.csc_matrix: `self` with the dimensions reversed. + + """ + if axes is not None: + raise ValueError( + 'Sparse matrices do not support an \'axes\' parameter because ' + 'swapping dimensions is the only logical permutation.') + + shape = self.shape[1], self.shape[0] + trans = _csc.csc_matrix( + (self.data, self.indices, self.indptr), shape=shape, copy=copy) + trans.has_canonical_format = self.has_canonical_format + return trans + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) + CSR matrix (row vector). + + Args: + i (integer): Row + + Returns: + cupyx.scipy.sparse.csr_matrix: Sparse matrix with single row + """ + return self._major_slice(slice(i, i + 1), copy=True) + + def getcol(self, i): + """Returns a copy of column i of the matrix, as a (m x 1) + CSR matrix (column vector). + + Args: + i (integer): Column + + Returns: + cupyx.scipy.sparse.csr_matrix: Sparse matrix with single column + """ + return self._minor_slice(slice(i, i + 1), copy=True) + + def _get_intXarray(self, row, col): + row = slice(row, row + 1) + return self._major_slice(row)._minor_index_fancy(col) + + def _get_intXslice(self, row, col): + row = slice(row, row + 1) + return self._major_slice(row)._minor_slice(col, copy=True) + + def _get_sliceXint(self, row, col): + col = slice(col, col + 1) + copy = row.step in (1, None) + return self._major_slice(row)._minor_slice(col, copy=copy) + + def _get_sliceXarray(self, row, col): + return self._major_slice(row)._minor_index_fancy(col) + + def _get_arrayXint(self, row, col): + col = slice(col, col + 1) + return self._major_index_fancy(row)._minor_slice(col) + + def _get_arrayXslice(self, row, col): + if col.step not in (1, None): + start, stop, step = col.indices(self.shape[1]) + cols = cupy.arange(start, stop, step, self.indices.dtype) + return self._get_arrayXarray(row, cols) + return self._major_index_fancy(row)._minor_slice(col) + + +def isspmatrix_csr(x): + """Checks if a given matrix is of CSR format. + + Returns: + bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.csr_matrix`. + + """ + return isinstance(x, csr_matrix) + + +def check_shape_for_pointwise_op(a_shape, b_shape, allow_broadcasting=True): + if allow_broadcasting: + a_m, a_n = a_shape + b_m, b_n = b_shape + if not (a_m == b_m or a_m == 1 or b_m == 1): + raise ValueError('inconsistent shape') + if not (a_n == b_n or a_n == 1 or b_n == 1): + raise ValueError('inconsistent shape') + else: + if a_shape != b_shape: + raise ValueError('inconsistent shape') + + +def multiply_by_scalar(sp, a): + data = sp.data * a + indices = sp.indices.copy() + indptr = sp.indptr.copy() + return csr_matrix((data, indices, indptr), shape=sp.shape) + + +def multiply_by_dense(sp, dn): + check_shape_for_pointwise_op(sp.shape, dn.shape) + sp_m, sp_n = sp.shape + dn_m, dn_n = dn.shape + m, n = max(sp_m, dn_m), max(sp_n, dn_n) + nnz = sp.nnz * (m // sp_m) * (n // sp_n) + dtype = numpy.promote_types(sp.dtype, dn.dtype) + data = cupy.empty(nnz, dtype=dtype) + indices = cupy.empty(nnz, dtype=sp.indices.dtype) + if m > sp_m: + if n > sp_n: + indptr = cupy.arange(0, nnz+1, n, dtype=sp.indptr.dtype) + else: + indptr = cupy.arange(0, nnz+1, sp.nnz, dtype=sp.indptr.dtype) + else: + indptr = sp.indptr.copy() + if n > sp_n: + indptr *= n + + # out = sp * dn + cupy_multiply_by_dense()(sp.data, sp.indptr, sp.indices, sp_m, sp_n, + dn, dn_m, dn_n, indptr, m, n, data, indices) + + return csr_matrix((data, indices, indptr), shape=(m, n)) + + +_GET_ROW_ID_ = ''' +__device__ inline int get_row_id(int i, int min, int max, const int *indptr) { + int row = (min + max) / 2; + while (min < max) { + if (i < indptr[row]) { + max = row - 1; + } else if (i >= indptr[row + 1]) { + min = row + 1; + } else { + break; + } + row = (min + max) / 2; + } + return row; +} +''' + +_FIND_INDEX_HOLDING_COL_IN_ROW_ = ''' +__device__ inline int find_index_holding_col_in_row( + int row, int col, const int *indptr, const int *indices) { + int j_min = indptr[row]; + int j_max = indptr[row+1] - 1; + while (j_min <= j_max) { + int j = (j_min + j_max) / 2; + int j_col = indices[j]; + if (j_col == col) { + return j; + } else if (j_col < col) { + j_min = j + 1; + } else { + j_max = j - 1; + } + } + return -1; +} +''' + + +@cupy._util.memoize(for_each_device=True) +def cupy_multiply_by_dense(): + return cupy.ElementwiseKernel( + ''' + raw S SP_DATA, raw I SP_INDPTR, raw I SP_INDICES, + int32 SP_M, int32 SP_N, + raw D DN_DATA, int32 DN_M, int32 DN_N, + raw I OUT_INDPTR, int32 OUT_M, int32 OUT_N + ''', + 'O OUT_DATA, I OUT_INDICES', + ''' + int i_out = i; + int m_out = get_row_id(i_out, 0, OUT_M - 1, &(OUT_INDPTR[0])); + int i_sp = i_out; + if (OUT_M > SP_M && SP_M == 1) { + i_sp -= OUT_INDPTR[m_out]; + } + if (OUT_N > SP_N && SP_N == 1) { + i_sp /= OUT_N; + } + int n_out = SP_INDICES[i_sp]; + if (OUT_N > SP_N && SP_N == 1) { + n_out = i_out - OUT_INDPTR[m_out]; + } + int m_dn = m_out; + if (OUT_M > DN_M && DN_M == 1) { + m_dn = 0; + } + int n_dn = n_out; + if (OUT_N > DN_N && DN_N == 1) { + n_dn = 0; + } + OUT_DATA = (O)(SP_DATA[i_sp] * DN_DATA[n_dn + (DN_N * m_dn)]); + OUT_INDICES = n_out; + ''', + 'cupyx_scipy_sparse_csr_multiply_by_dense', + preamble=_GET_ROW_ID_ + ) + + +@cupy._util.memoize(for_each_device=True) +def _cupy_divide_by_dense(): + return cupy.ElementwiseKernel( + 'T data, I row, I col, I width, raw T other', + 'T res', + ''' + res = data / other[row * width + col] + ''', + 'cupyx_scipy_sparse_coo_divide_dense', + ) + + +def multiply_by_csr(a, b): + check_shape_for_pointwise_op(a.shape, b.shape) + a_m, a_n = a.shape + b_m, b_n = b.shape + m, n = max(a_m, b_m), max(a_n, b_n) + a_nnz = a.nnz * (m // a_m) * (n // a_n) + b_nnz = b.nnz * (m // b_m) * (n // b_n) + if a_nnz > b_nnz: + return multiply_by_csr(b, a) + c_nnz = a_nnz + dtype = numpy.promote_types(a.dtype, b.dtype) + c_data = cupy.empty(c_nnz, dtype=dtype) + c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype) + if m > a_m: + if n > a_n: + c_indptr = cupy.arange(0, c_nnz+1, n, dtype=a.indptr.dtype) + else: + c_indptr = cupy.arange(0, c_nnz+1, a.nnz, dtype=a.indptr.dtype) + else: + c_indptr = a.indptr.copy() + if n > a_n: + c_indptr *= n + flags = cupy.zeros(c_nnz+1, dtype=a.indices.dtype) + nnz_each_row = cupy.zeros(m+1, dtype=a.indptr.dtype) + + # compute c = a * b where necessary and get sparsity pattern of matrix d + cupy_multiply_by_csr_step1()( + a.data, a.indptr, a.indices, a_m, a_n, + b.data, b.indptr, b.indices, b_m, b_n, + c_indptr, m, n, c_data, c_indices, flags, nnz_each_row) + + flags = cupy.cumsum(flags, dtype=a.indptr.dtype) + d_indptr = cupy.cumsum(nnz_each_row, dtype=a.indptr.dtype) + d_nnz = int(d_indptr[-1]) + d_data = cupy.empty(d_nnz, dtype=dtype) + d_indices = cupy.empty(d_nnz, dtype=a.indices.dtype) + + # remove zero elements in matrix c + cupy_multiply_by_csr_step2()(c_data, c_indices, flags, d_data, d_indices) + + return csr_matrix((d_data, d_indices, d_indptr), shape=(m, n)) + + +@cupy._util.memoize(for_each_device=True) +def cupy_multiply_by_csr_step1(): + return cupy.ElementwiseKernel( + ''' + raw A A_DATA, raw I A_INDPTR, raw I A_INDICES, int32 A_M, int32 A_N, + raw B B_DATA, raw I B_INDPTR, raw I B_INDICES, int32 B_M, int32 B_N, + raw I C_INDPTR, int32 C_M, int32 C_N + ''', + 'C C_DATA, I C_INDICES, raw I FLAGS, raw I NNZ_EACH_ROW', + ''' + int i_c = i; + int m_c = get_row_id(i_c, 0, C_M - 1, &(C_INDPTR[0])); + + int i_a = i; + if (C_M > A_M && A_M == 1) { + i_a -= C_INDPTR[m_c]; + } + if (C_N > A_N && A_N == 1) { + i_a /= C_N; + } + int n_c = A_INDICES[i_a]; + if (C_N > A_N && A_N == 1) { + n_c = i % C_N; + } + int m_b = m_c; + if (C_M > B_M && B_M == 1) { + m_b = 0; + } + int n_b = n_c; + if (C_N > B_N && B_N == 1) { + n_b = 0; + } + int i_b = find_index_holding_col_in_row(m_b, n_b, + &(B_INDPTR[0]), &(B_INDICES[0])); + if (i_b >= 0) { + atomicAdd(&(NNZ_EACH_ROW[m_c+1]), 1); + FLAGS[i+1] = 1; + C_DATA = (C)(A_DATA[i_a] * B_DATA[i_b]); + C_INDICES = n_c; + } + ''', + 'cupyx_scipy_sparse_csr_multiply_by_csr_step1', + preamble=_GET_ROW_ID_ + _FIND_INDEX_HOLDING_COL_IN_ROW_ + ) + + +@cupy._util.memoize(for_each_device=True) +def cupy_multiply_by_csr_step2(): + return cupy.ElementwiseKernel( + 'T C_DATA, I C_INDICES, raw I FLAGS', + 'raw D D_DATA, raw I D_INDICES', + ''' + int j = FLAGS[i]; + if (j < FLAGS[i+1]) { + D_DATA[j] = (D)(C_DATA); + D_INDICES[j] = C_INDICES; + } + ''', + 'cupyx_scipy_sparse_csr_multiply_by_csr_step2' + ) + + +_BINOPT_MAX_ = ''' +__device__ inline O binopt(T in1, T in2) { + return max(in1, in2); +} +''' +_BINOPT_MIN_ = ''' +__device__ inline O binopt(T in1, T in2) { + return min(in1, in2); +} +''' +_BINOPT_EQ_ = ''' +__device__ inline O binopt(T in1, T in2) { + return (in1 == in2); +} +''' +_BINOPT_NE_ = ''' +__device__ inline O binopt(T in1, T in2) { + return (in1 != in2); +} +''' +_BINOPT_LT_ = ''' +__device__ inline O binopt(T in1, T in2) { + return (in1 < in2); +} +''' +_BINOPT_GT_ = ''' +__device__ inline O binopt(T in1, T in2) { + return (in1 > in2); +} +''' +_BINOPT_LE_ = ''' +__device__ inline O binopt(T in1, T in2) { + return (in1 <= in2); +} +''' +_BINOPT_GE_ = ''' +__device__ inline O binopt(T in1, T in2) { + return (in1 >= in2); +} +''' + + +def binopt_csr(a, b, op_name): + check_shape_for_pointwise_op(a.shape, b.shape) + a_m, a_n = a.shape + b_m, b_n = b.shape + m, n = max(a_m, b_m), max(a_n, b_n) + a_nnz = a.nnz * (m // a_m) * (n // a_n) + b_nnz = b.nnz * (m // b_m) * (n // b_n) + + a_info = cupy.zeros(a_nnz + 1, dtype=a.indices.dtype) + b_info = cupy.zeros(b_nnz + 1, dtype=b.indices.dtype) + a_valid = cupy.zeros(a_nnz, dtype=numpy.int8) + b_valid = cupy.zeros(b_nnz, dtype=numpy.int8) + c_indptr = cupy.zeros(m + 1, dtype=a.indptr.dtype) + in_dtype = numpy.promote_types(a.dtype, b.dtype) + a_data = a.data.astype(in_dtype, copy=False) + b_data = b.data.astype(in_dtype, copy=False) + funcs = _GET_ROW_ID_ + if op_name == '_maximum_': + funcs += _BINOPT_MAX_ + out_dtype = in_dtype + elif op_name == '_minimum_': + funcs += _BINOPT_MIN_ + out_dtype = in_dtype + elif op_name == '_eq_': + funcs += _BINOPT_EQ_ + out_dtype = numpy.bool_ + elif op_name == '_ne_': + funcs += _BINOPT_NE_ + out_dtype = numpy.bool_ + elif op_name == '_lt_': + funcs += _BINOPT_LT_ + out_dtype = numpy.bool_ + elif op_name == '_gt_': + funcs += _BINOPT_GT_ + out_dtype = numpy.bool_ + elif op_name == '_le_': + funcs += _BINOPT_LE_ + out_dtype = numpy.bool_ + elif op_name == '_ge_': + funcs += _BINOPT_GE_ + out_dtype = numpy.bool_ + else: + raise ValueError('invalid op_name: {}'.format(op_name)) + a_tmp_data = cupy.empty(a_nnz, dtype=out_dtype) + b_tmp_data = cupy.empty(b_nnz, dtype=out_dtype) + a_tmp_indices = cupy.empty(a_nnz, dtype=a.indices.dtype) + b_tmp_indices = cupy.empty(b_nnz, dtype=b.indices.dtype) + _size = a_nnz + b_nnz + cupy_binopt_csr_step1(op_name, preamble=funcs)( + m, n, + a.indptr, a.indices, a_data, a_m, a_n, a.nnz, a_nnz, + b.indptr, b.indices, b_data, b_m, b_n, b.nnz, b_nnz, + a_info, a_valid, a_tmp_indices, a_tmp_data, + b_info, b_valid, b_tmp_indices, b_tmp_data, + c_indptr, size=_size) + a_info = cupy.cumsum(a_info, dtype=a_info.dtype) + b_info = cupy.cumsum(b_info, dtype=b_info.dtype) + c_indptr = cupy.cumsum(c_indptr, dtype=c_indptr.dtype) + c_nnz = int(c_indptr[-1]) + c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype) + c_data = cupy.empty(c_nnz, dtype=out_dtype) + cupy_binopt_csr_step2(op_name)( + a_info, a_valid, a_tmp_indices, a_tmp_data, a_nnz, + b_info, b_valid, b_tmp_indices, b_tmp_data, b_nnz, + c_indices, c_data, size=_size) + return csr_matrix((c_data, c_indices, c_indptr), shape=(m, n)) + + +@cupy._util.memoize(for_each_device=True) +def cupy_binopt_csr_step1(op_name, preamble=''): + name = 'cupyx_scipy_sparse_csr_binopt_' + op_name + 'step1' + return cupy.ElementwiseKernel( + ''' + int32 M, int32 N, + raw I A_INDPTR, raw I A_INDICES, raw T A_DATA, + int32 A_M, int32 A_N, int32 A_NNZ_ACT, int32 A_NNZ, + raw I B_INDPTR, raw I B_INDICES, raw T B_DATA, + int32 B_M, int32 B_N, int32 B_NNZ_ACT, int32 B_NNZ + ''', + ''' + raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA, + raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA, + raw I C_INFO + ''', + ''' + if (i >= A_NNZ + B_NNZ) return; + + const int *MY_INDPTR, *MY_INDICES; int *MY_INFO; const T *MY_DATA; + const int *OP_INDPTR, *OP_INDICES; int *OP_INFO; const T *OP_DATA; + int MY_M, MY_N, MY_NNZ_ACT, MY_NNZ; + int OP_M, OP_N, OP_NNZ_ACT, OP_NNZ; + signed char *MY_VALID; I *MY_TMP_INDICES; O *MY_TMP_DATA; + + int my_j; + if (i < A_NNZ) { + // in charge of one of non-zero element of sparse matrix A + my_j = i; + MY_INDPTR = &(A_INDPTR[0]); OP_INDPTR = &(B_INDPTR[0]); + MY_INDICES = &(A_INDICES[0]); OP_INDICES = &(B_INDICES[0]); + MY_INFO = &(A_INFO[0]); OP_INFO = &(B_INFO[0]); + MY_DATA = &(A_DATA[0]); OP_DATA = &(B_DATA[0]); + MY_M = A_M; OP_M = B_M; + MY_N = A_N; OP_N = B_N; + MY_NNZ_ACT = A_NNZ_ACT; OP_NNZ_ACT = B_NNZ_ACT; + MY_NNZ = A_NNZ; OP_NNZ = B_NNZ; + MY_VALID = &(A_VALID[0]); + MY_TMP_DATA= &(A_TMP_DATA[0]); + MY_TMP_INDICES = &(A_TMP_INDICES[0]); + } else { + // in charge of one of non-zero element of sparse matrix B + my_j = i - A_NNZ; + MY_INDPTR = &(B_INDPTR[0]); OP_INDPTR = &(A_INDPTR[0]); + MY_INDICES = &(B_INDICES[0]); OP_INDICES = &(A_INDICES[0]); + MY_INFO = &(B_INFO[0]); OP_INFO = &(A_INFO[0]); + MY_DATA = &(B_DATA[0]); OP_DATA = &(A_DATA[0]); + MY_M = B_M; OP_M = A_M; + MY_N = B_N; OP_N = A_N; + MY_NNZ_ACT = B_NNZ_ACT; OP_NNZ_ACT = A_NNZ_ACT; + MY_NNZ = B_NNZ; OP_NNZ = A_NNZ; + MY_VALID = &(B_VALID[0]); + MY_TMP_DATA= &(B_TMP_DATA[0]); + MY_TMP_INDICES = &(B_TMP_INDICES[0]); + } + int _min, _max, _mid; + + // get column location + int my_col; + int my_j_act = my_j; + if (MY_M == 1 && MY_M < M) { + if (MY_N == 1 && MY_N < N) my_j_act = 0; + else my_j_act = my_j % MY_NNZ_ACT; + } else { + if (MY_N == 1 && MY_N < N) my_j_act = my_j / N; + } + my_col = MY_INDICES[my_j_act]; + if (MY_N == 1 && MY_N < N) { + my_col = my_j % N; + } + + // get row location + int my_row = get_row_id(my_j_act, 0, MY_M - 1, &(MY_INDPTR[0])); + if (MY_M == 1 && MY_M < M) { + if (MY_N == 1 && MY_N < N) my_row = my_j / N; + else my_row = my_j / MY_NNZ_ACT; + } + + int op_row = my_row; + int op_row_act = op_row; + if (OP_M == 1 && OP_M < M) { + op_row_act = 0; + } + + int op_col = 0; + _min = OP_INDPTR[op_row_act]; + _max = OP_INDPTR[op_row_act + 1] - 1; + int op_j_act = _min; + bool op_nz = false; + if (_min <= _max) { + if (OP_N == 1 && OP_N < N) { + op_col = my_col; + op_nz = true; + } + else { + _mid = (_min + _max) / 2; + op_col = OP_INDICES[_mid]; + while (_min < _max) { + if (op_col < my_col) { + _min = _mid + 1; + } else if (op_col > my_col) { + _max = _mid; + } else { + break; + } + _mid = (_min + _max) / 2; + op_col = OP_INDICES[_mid]; + } + op_j_act = _mid; + if (op_col == my_col) { + op_nz = true; + } else if (op_col < my_col) { + op_col = N; + op_j_act += 1; + } + } + } + + int op_j = op_j_act; + if (OP_M == 1 && OP_M < M) { + if (OP_N == 1 && OP_N < N) { + op_j = (op_col + N * op_row) * OP_NNZ_ACT; + } else { + op_j = op_j_act + OP_NNZ_ACT * op_row; + } + } else { + if (OP_N == 1 && OP_N < N) { + op_j = op_col + N * op_j_act; + } + } + + if (i < A_NNZ || !op_nz) { + T my_data = MY_DATA[my_j_act]; + T op_data = 0; + if (op_nz) op_data = OP_DATA[op_j_act]; + O out; + if (i < A_NNZ) out = binopt(my_data, op_data); + else out = binopt(op_data, my_data); + if (out != static_cast(0)) { + MY_VALID[my_j] = 1; + MY_TMP_DATA[my_j] = out; + MY_TMP_INDICES[my_j] = my_col; + atomicAdd( &(C_INFO[my_row + 1]), 1 ); + atomicAdd( &(MY_INFO[my_j + 1]), 1 ); + atomicAdd( &(OP_INFO[op_j]), 1 ); + } + } + ''', + name, preamble=preamble, + ) + + +@cupy._util.memoize(for_each_device=True) +def cupy_binopt_csr_step2(op_name): + name = 'cupyx_scipy_sparse_csr_binopt' + op_name + 'step2' + return cupy.ElementwiseKernel( + ''' + raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA, + int32 A_NNZ, + raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA, + int32 B_NNZ + ''', + 'raw I C_INDICES, raw O C_DATA', + ''' + if (i < A_NNZ) { + int j = i; + if (A_VALID[j]) { + C_INDICES[A_INFO[j]] = A_TMP_INDICES[j]; + C_DATA[A_INFO[j]] = A_TMP_DATA[j]; + } + } else if (i < A_NNZ + B_NNZ) { + int j = i - A_NNZ; + if (B_VALID[j]) { + C_INDICES[B_INFO[j]] = B_TMP_INDICES[j]; + C_DATA[B_INFO[j]] = B_TMP_DATA[j]; + } + } + ''', + name, + ) + + +def csr2dense(a, order): + out = cupy.zeros(a.shape, dtype=a.dtype, order=order) + m, n = a.shape + kern = _cupy_csr2dense(a.dtype) + kern(m, n, a.indptr, a.indices, a.data, (order == 'C'), out) + return out + + +@cupy._util.memoize(for_each_device=True) +def _cupy_csr2dense(dtype): + if dtype == '?': + op = "if (DATA) OUT[index] = true;" + else: + op = "atomicAdd(&OUT[index], DATA);" + + return cupy.ElementwiseKernel( + 'int32 M, int32 N, raw I INDPTR, I INDICES, T DATA, bool C_ORDER', + 'raw T OUT', + ''' + int row = get_row_id(i, 0, M - 1, &(INDPTR[0])); + int col = INDICES; + int index = C_ORDER ? col + N * row : row + M * col; + ''' + op, + 'cupyx_scipy_sparse_csr2dense', + preamble=_GET_ROW_ID_ + ) + + +def dense2csr(a): + from cupyx import cusparse + + if a.dtype.char in 'fdFD': + if cusparse.check_availability('denseToSparse'): + return cusparse.denseToSparse(a, format='csr') + else: + return cusparse.dense2csr(a) + m, n = a.shape + a = cupy.ascontiguousarray(a) + indptr = cupy.zeros(m + 1, dtype=numpy.int32) + info = cupy.zeros(m * n + 1, dtype=numpy.int32) + cupy_dense2csr_step1()(m, n, a, indptr, info) + indptr = cupy.cumsum(indptr, dtype=numpy.int32) + info = cupy.cumsum(info, dtype=numpy.int32) + nnz = int(indptr[-1]) + indices = cupy.empty(nnz, dtype=numpy.int32) + data = cupy.empty(nnz, dtype=a.dtype) + cupy_dense2csr_step2()(m, n, a, info, indices, data) + return csr_matrix((data, indices, indptr), shape=(m, n)) + + +@cupy._util.memoize(for_each_device=True) +def cupy_dense2csr_step1(): + return cupy.ElementwiseKernel( + 'int32 M, int32 N, T A', + 'raw I INDPTR, raw I INFO', + ''' + int row = i / N; + int col = i % N; + if (A != static_cast(0)) { + atomicAdd( &(INDPTR[row + 1]), 1 ); + INFO[i + 1] = 1; + } + ''', + 'cupyx_scipy_sparse_dense2csr_step1') + + +@cupy._util.memoize(for_each_device=True) +def cupy_dense2csr_step2(): + return cupy.ElementwiseKernel( + 'int32 M, int32 N, T A, raw I INFO', + 'raw I INDICES, raw T DATA', + ''' + int row = i / N; + int col = i % N; + if (A != static_cast(0)) { + int idx = INFO[i]; + INDICES[idx] = col; + DATA[idx] = A; + } + ''', + 'cupyx_scipy_sparse_dense2csr_step2') + + +@cupy._util.memoize(for_each_device=True) +def _cupy_csr_diagonal(): + return cupy.ElementwiseKernel( + 'int32 k, int32 rows, int32 cols, ' + 'raw T data, raw I indptr, raw I indices', + 'T y', + ''' + int row = i; + int col = i; + if (k < 0) row -= k; + if (k > 0) col += k; + if (row >= rows || col >= cols) return; + int j = find_index_holding_col_in_row(row, col, + &(indptr[0]), &(indices[0])); + if (j >= 0) { + y = data[j]; + } else { + y = static_cast(0); + } + ''', + 'cupyx_scipy_sparse_csr_diagonal', + preamble=_FIND_INDEX_HOLDING_COL_IN_ROW_ + ) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_data.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_data.py new file mode 100644 index 0000000000000000000000000000000000000000..5c69acc849f8f5d9c647d05e23000a333efd298f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_data.py @@ -0,0 +1,398 @@ +import cupy +import numpy as np +from cupy._core import internal +from cupy import _util +from cupyx.scipy.sparse import _base +from cupyx.scipy.sparse import _coo +from cupyx.scipy.sparse import _sputils + + +_ufuncs = [ + 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'ceil', 'deg2rad', 'expm1', + 'floor', 'log1p', 'rad2deg', 'rint', 'sign', 'sin', 'sinh', 'sqrt', 'tan', + 'tanh', 'trunc', +] + + +class _data_matrix(_base.spmatrix): + + def __init__(self, data): + self.data = data + + @property + def dtype(self): + """Data type of the matrix.""" + return self.data.dtype + + def _with_data(self, data, copy=True): + raise NotImplementedError + + def __abs__(self): + """Elementwise absolute.""" + return self._with_data(abs(self.data)) + + def __neg__(self): + """Elementwise negative.""" + return self._with_data(-self.data) + + def astype(self, t): + """Casts the array to given data type. + + Args: + dtype: Type specifier. + + Returns: + A copy of the array with a given type. + + """ + return self._with_data(self.data.astype(t)) + + def conj(self, copy=True): + if cupy.issubdtype(self.dtype, cupy.complexfloating): + return self._with_data(self.data.conj(), copy=copy) + elif copy: + return self.copy() + else: + return self + + conj.__doc__ = _base.spmatrix.conj.__doc__ + + def copy(self): + return self._with_data(self.data.copy(), copy=True) + + copy.__doc__ = _base.spmatrix.copy.__doc__ + + def count_nonzero(self): + """Returns number of non-zero entries. + + .. note:: + This method counts the actual number of non-zero entories, which + does not include explicit zero entries. + Instead ``nnz`` returns the number of entries including explicit + zeros. + + Returns: + Number of non-zero entries. + + """ + return cupy.count_nonzero(self.data) + + def mean(self, axis=None, dtype=None, out=None): + """Compute the arithmetic mean along the specified axis. + + Args: + axis (int or ``None``): Axis along which the sum is computed. + If it is ``None``, it computes the average of all the elements. + Select from ``{None, 0, 1, -2, -1}``. + + Returns: + cupy.ndarray: Summed array. + + .. seealso:: + :meth:`scipy.sparse.spmatrix.mean` + + """ + _sputils.validateaxis(axis) + nRow, nCol = self.shape + data = self.data.copy() + + if axis is None: + n = nRow * nCol + elif axis in (0, -2): + n = nRow + else: + n = nCol + + return self._with_data(data / n).sum(axis, dtype, out) + + def power(self, n, dtype=None): + """Elementwise power function. + + Args: + n: Exponent. + dtype: Type specifier. + + """ + if dtype is None: + data = self.data.copy() + else: + data = self.data.astype(dtype, copy=True) + data **= n + return self._with_data(data) + + +def _find_missing_index(ind, n): + positions = cupy.arange(ind.size) + diff = ind != positions + return cupy.where( + diff.any(), + diff.argmax(), + cupy.asarray(ind.size if ind.size < n else -1)) + + +def _non_zero_cmp(mat, am, zero, m): + size = np.prod(mat.shape) + if size == mat.nnz: + return am + else: + ind = mat.row * mat.shape[1] + mat.col + zero_ind = _find_missing_index(ind, size) + return cupy.where( + m == zero, + cupy.minimum(zero_ind, am), + zero_ind) + + +class _minmax_mixin(object): + """Mixin for min and max methods. + These are not implemented for dia_matrix, hence the separate class. + + """ + + def _min_or_max_axis(self, axis, min_or_max, explicit): + N = self.shape[axis] + if N == 0: + raise ValueError("zero-size array to reduction operation") + M = self.shape[1 - axis] + + mat = self.tocsc() if axis == 0 else self.tocsr() + mat.sum_duplicates() + + # Do the reduction + value = mat._minor_reduce(min_or_max, axis, explicit) + major_index = cupy.arange(M) + + mask = value != 0 + major_index = cupy.compress(mask, major_index) + value = cupy.compress(mask, value) + + if axis == 0: + return _coo.coo_matrix( + (value, (cupy.zeros(len(value)), major_index)), + dtype=self.dtype, shape=(1, M)) + else: + return _coo.coo_matrix( + (value, (major_index, cupy.zeros(len(value)))), + dtype=self.dtype, shape=(M, 1)) + + def _min_or_max(self, axis, out, min_or_max, explicit): + if out is not None: + raise ValueError(("Sparse matrices do not support " + "an 'out' parameter.")) + + _sputils.validateaxis(axis) + + if axis is None: + if 0 in self.shape: + raise ValueError("zero-size array to reduction operation") + + zero = cupy.zeros((), dtype=self.dtype) + if self.nnz == 0: + return zero + self.sum_duplicates() + m = min_or_max(self.data) + if explicit: + return m + if self.nnz != internal.prod(self.shape): + if min_or_max is cupy.min: + m = cupy.minimum(zero, m) + elif min_or_max is cupy.max: + m = cupy.maximum(zero, m) + else: + assert False + return m + + if axis < 0: + axis += 2 + + return self._min_or_max_axis(axis, min_or_max, explicit) + + def _arg_min_or_max_axis(self, axis, op): + if self.shape[axis] == 0: + raise ValueError("Can't apply the operation along a zero-sized " + "dimension.") + + mat = self.tocsc() if axis == 0 else self.tocsr() + mat.sum_duplicates() + + # Do the reduction + value = mat._arg_minor_reduce(op, axis) + + if axis == 0: + return value[None, :] + else: + return value[:, None] + + def _arg_min_or_max(self, axis, out, op, compare): + if out is not None: + raise ValueError("Sparse matrices do not support " + "an 'out' parameter.") + + _sputils.validateaxis(axis) + + if axis is None: + if 0 in self.shape: + raise ValueError("Can't apply the operation to " + "an empty matrix.") + + if self.nnz == 0: + return 0 + else: + zero = cupy.asarray(self.dtype.type(0)) + mat = self.tocoo() + + mat.sum_duplicates() + + am = op(mat.data) + m = mat.data[am] + + return cupy.where( + compare(m, zero), mat.row[am] * mat.shape[1] + mat.col[am], + _non_zero_cmp(mat, am, zero, m)) + + if axis < 0: + axis += 2 + + return self._arg_min_or_max_axis(axis, op) + + def max(self, axis=None, out=None, *, explicit=False): + """Returns the maximum of the matrix or maximum along an axis. + + Args: + axis (int): {-2, -1, 0, 1, ``None``} (optional) + Axis along which the sum is computed. The default is to + compute the maximum over all the matrix elements, returning + a scalar (i.e. ``axis`` = ``None``). + out (None): (optional) + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except + for the default value, as this argument is not used. + explicit (bool): Return the maximum value explicitly specified and + ignore all implicit zero entries. If the dimension has no + explicit values, a zero is then returned to indicate that it is + the only implicit value. This parameter is experimental and may + change in the future. + + Returns: + (cupy.ndarray or float): Maximum of ``a``. If ``axis`` is + ``None``, the result is a scalar value. If ``axis`` is given, + the result is an array of dimension ``a.ndim - 1``. This + differs from numpy for computational efficiency. + + .. seealso:: min : The minimum value of a sparse matrix along a given + axis. + .. seealso:: numpy.matrix.max : NumPy's implementation of ``max`` for + matrices + + """ + if explicit: + api_name = 'explicit of cupyx.scipy.sparse.{}.max'.format( + self.__class__.__name__) + _util.experimental(api_name) + return self._min_or_max(axis, out, cupy.max, explicit) + + def min(self, axis=None, out=None, *, explicit=False): + """Returns the minimum of the matrix or maximum along an axis. + + Args: + axis (int): {-2, -1, 0, 1, ``None``} (optional) + Axis along which the sum is computed. The default is to + compute the minimum over all the matrix elements, returning + a scalar (i.e. ``axis`` = ``None``). + out (None): (optional) + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + explicit (bool): Return the minimum value explicitly specified and + ignore all implicit zero entries. If the dimension has no + explicit values, a zero is then returned to indicate that it is + the only implicit value. This parameter is experimental and may + change in the future. + + Returns: + (cupy.ndarray or float): Minimum of ``a``. If ``axis`` is + None, the result is a scalar value. If ``axis`` is given, the + result is an array of dimension ``a.ndim - 1``. This differs + from numpy for computational efficiency. + + .. seealso:: max : The maximum value of a sparse matrix along a given + axis. + .. seealso:: numpy.matrix.min : NumPy's implementation of 'min' for + matrices + + """ + if explicit: + api_name = 'explicit of cupyx.scipy.sparse.{}.min'.format( + self.__class__.__name__) + _util.experimental(api_name) + return self._min_or_max(axis, out, cupy.min, explicit) + + def argmax(self, axis=None, out=None): + """Returns indices of maximum elements along an axis. + + Implicit zero elements are taken into account. If there are several + maximum values, the index of the first occurrence is returned. If + ``NaN`` values occur in the matrix, the output defaults to a zero entry + for the row/column in which the NaN occurs. + + Args: + axis (int): {-2, -1, 0, 1, ``None``} (optional) + Axis along which the argmax is computed. If ``None`` (default), + index of the maximum element in the flatten data is returned. + out (None): (optional) + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns: + (cupy.narray or int): Indices of maximum elements. If array, + its size along ``axis`` is 1. + + """ + return self._arg_min_or_max(axis, out, cupy.argmax, cupy.greater) + + def argmin(self, axis=None, out=None): + """ + Returns indices of minimum elements along an axis. + + Implicit zero elements are taken into account. If there are several + minimum values, the index of the first occurrence is returned. If + ``NaN`` values occur in the matrix, the output defaults to a zero entry + for the row/column in which the NaN occurs. + + Args: + axis (int): {-2, -1, 0, 1, ``None``} (optional) + Axis along which the argmin is computed. If ``None`` (default), + index of the minimum element in the flatten data is returned. + out (None): (optional) + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns: + (cupy.narray or int): Indices of minimum elements. If matrix, + its size along ``axis`` is 1. + + """ + return self._arg_min_or_max(axis, out, cupy.argmin, cupy.less) + + +def _install_ufunc(func_name): + + def f(self): + ufunc = getattr(cupy, func_name) + result = ufunc(self.data) + return self._with_data(result) + + f.__doc__ = 'Elementwise %s.' % func_name + f.__name__ = func_name + + setattr(_data_matrix, func_name, f) + + +def _install_ufuncs(): + for func_name in _ufuncs: + _install_ufunc(func_name) + + +_install_ufuncs() diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_dia.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_dia.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a23bea81a5004bfc0d7d1af09dbb9d2442ba54 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_dia.py @@ -0,0 +1,219 @@ +try: + import scipy.sparse + _scipy_available = True +except ImportError: + _scipy_available = False + +import cupy +from cupy import _core +from cupyx.scipy.sparse import _csc +from cupyx.scipy.sparse import _data +from cupyx.scipy.sparse import _util + + +# TODO(leofang): The current implementation is CSC-based, which is troublesome +# on ROCm/HIP. We should convert it to CSR-based for portability. +class dia_matrix(_data._data_matrix): + + """Sparse matrix with DIAgonal storage. + + Now it has only one initializer format below: + + ``dia_matrix((data, offsets))`` + + Args: + arg1: Arguments for the initializer. + shape (tuple): Shape of a matrix. Its length must be two. + dtype: Data type. It must be an argument of :class:`numpy.dtype`. + copy (bool): If ``True``, copies of given arrays are always used. + + .. seealso:: + :class:`scipy.sparse.dia_matrix` + + """ + + format = 'dia' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + if _scipy_available and scipy.sparse.issparse(arg1): + x = arg1.todia() + data = x.data + offsets = x.offsets + shape = x.shape + dtype = x.dtype + copy = False + elif isinstance(arg1, tuple): + data, offsets = arg1 + if shape is None: + raise ValueError('expected a shape argument') + + else: + raise ValueError( + 'unrecognized form for dia_matrix constructor') + + data = cupy.array(data, dtype=dtype, copy=copy) + data = cupy.atleast_2d(data) + offsets = cupy.array(offsets, dtype='i', copy=copy) + offsets = cupy.atleast_1d(offsets) + + if offsets.ndim != 1: + raise ValueError('offsets array must have rank 1') + + if data.ndim != 2: + raise ValueError('data array must have rank 2') + + if data.shape[0] != len(offsets): + raise ValueError( + 'number of diagonals (%d) does not match the number of ' + 'offsets (%d)' + % (data.shape[0], len(offsets))) + + sorted_offsets = cupy.sort(offsets) + if (sorted_offsets[:-1] == sorted_offsets[1:]).any(): + raise ValueError('offset array contains duplicate values') + + self.data = data + self.offsets = offsets + if not _util.isshape(shape): + raise ValueError('invalid shape (must be a 2-tuple of int)') + self._shape = int(shape[0]), int(shape[1]) + + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays are copied. + """ + if copy: + return dia_matrix((data, self.offsets.copy()), shape=self.shape) + else: + return dia_matrix((data, self.offsets), shape=self.shape) + + def get(self, stream=None): + """Returns a copy of the array on host memory. + + Args: + stream (cupy.cuda.Stream): CUDA stream object. If it is given, the + copy runs asynchronously. Otherwise, the copy is synchronous. + + Returns: + scipy.sparse.dia_matrix: Copy of the array on host memory. + + """ + if not _scipy_available: + raise RuntimeError('scipy is not available') + data = self.data.get(stream) + offsets = self.offsets.get(stream) + return scipy.sparse.dia_matrix((data, offsets), shape=self._shape) + + def get_shape(self): + """Returns the shape of the matrix. + + Returns: + tuple: Shape of the matrix. + """ + return self._shape + + def getnnz(self, axis=None): + """Returns the number of stored values, including explicit zeros. + + Args: + axis: Not supported yet. + + Returns: + int: The number of stored values. + + """ + if axis is not None: + raise NotImplementedError( + 'getnnz over an axis is not implemented for DIA format') + + m, n = self.shape + nnz = _core.ReductionKernel( + 'int32 offsets, int32 m, int32 n', 'int32 nnz', + 'offsets > 0 ? min(m, n - offsets) : min(m + offsets, n)', + 'a + b', 'nnz = a', '0', 'dia_nnz')(self.offsets, m, n) + return int(nnz) + + def toarray(self, order=None, out=None): + """Returns a dense matrix representing the same value.""" + return self.tocsc().toarray(order=order, out=out) + + def tocsc(self, copy=False): + """Converts the matrix to Compressed Sparse Column format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. Actually this option is ignored because all + arrays in a matrix cannot be shared in dia to csc conversion. + + Returns: + cupyx.scipy.sparse.csc_matrix: Converted matrix. + + """ + if self.data.size == 0: + return _csc.csc_matrix(self.shape, dtype=self.dtype) + + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + + row, mask = _core.ElementwiseKernel( + 'int32 offset_len, int32 offsets, int32 num_rows, ' + 'int32 num_cols, T data', + 'int32 row, bool mask', + ''' + int offset_inds = i % offset_len; + row = offset_inds - offsets; + mask = (row >= 0 && row < num_rows && offset_inds < num_cols + && data != T(0)); + ''', + 'cupyx_scipy_sparse_dia_tocsc')(offset_len, self.offsets[:, None], + num_rows, num_cols, self.data) + indptr = cupy.zeros(num_cols + 1, dtype='i') + indptr[1: offset_len + 1] = cupy.cumsum(mask.sum(axis=0)) + indptr[offset_len + 1:] = indptr[offset_len] + indices = row.T[mask.T].astype('i', copy=False) + data = self.data.T[mask.T] + return _csc.csc_matrix( + (data, indices, indptr), shape=self.shape, dtype=self.dtype) + + def tocsr(self, copy=False): + """Converts the matrix to Compressed Sparse Row format. + + Args: + copy (bool): If ``False``, it shares data arrays as much as + possible. Actually this option is ignored because all + arrays in a matrix cannot be shared in dia to csr conversion. + + Returns: + cupyx.scipy.sparse.csc_matrix: Converted matrix. + + """ + return self.tocsc().tocsr() + + def diagonal(self, k=0): + """Returns the k-th diagonal of the matrix. + + Args: + k (int, optional): Which diagonal to get, corresponding to elements + a[i, i+k]. Default: 0 (the main diagonal). + + Returns: + cupy.ndarray : The k-th diagonal. + """ + rows, cols = self.shape + if k <= -rows or k >= cols: + return cupy.empty(0, dtype=self.data.dtype) + idx, = cupy.nonzero(self.offsets == k) + first_col, last_col = max(0, k), min(rows + k, cols) + if idx.size == 0: + return cupy.zeros(last_col - first_col, dtype=self.data.dtype) + return self.data[idx[0], first_col:last_col] + + +def isspmatrix_dia(x): + """Checks if a given matrix is of DIA format. + + Returns: + bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.dia_matrix`. + + """ + return isinstance(x, dia_matrix) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_extract.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd5e52e1b655f7027fcd2600cb766f11183e3e3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_extract.py @@ -0,0 +1,81 @@ +import cupy +import cupyx + +from cupyx.scipy import sparse + + +def find(A): + """Returns the indices and values of the nonzero elements of a matrix + + Args: + A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Matrix whose nonzero + elements are desired. + + Returns: + tuple of cupy.ndarray: + It returns (``I``, ``J``, ``V``). ``I``, ``J``, and ``V`` contain + respectively the row indices, column indices, and values of the + nonzero matrix entries. + + .. seealso:: :func:`scipy.sparse.find` + """ + _check_A_type(A) + A = sparse.coo_matrix(A, copy=True) + A.sum_duplicates() + nz_mask = A.data != 0 + return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask] + + +def tril(A, k=0, format=None): + """Returns the lower triangular portion of a matrix in sparse format + + Args: + A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Matrix whose lower + triangular portion is desired. + k (integer): The top-most diagonal of the lower triangle. + format (string): Sparse format of the result, e.g. 'csr', 'csc', etc. + + Returns: + cupyx.scipy.sparse.spmatrix: + Lower triangular portion of A in sparse format. + + .. seealso:: :func:`scipy.sparse.tril` + """ + _check_A_type(A) + A = sparse.coo_matrix(A, copy=False) + mask = A.row + k >= A.col + return _masked_coo(A, mask).asformat(format) + + +def triu(A, k=0, format=None): + """Returns the upper triangular portion of a matrix in sparse format + + Args: + A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Matrix whose upper + triangular portion is desired. + k (integer): The bottom-most diagonal of the upper triangle. + format (string): Sparse format of the result, e.g. 'csr', 'csc', etc. + + Returns: + cupyx.scipy.sparse.spmatrix: + Upper triangular portion of A in sparse format. + + .. seealso:: :func:`scipy.sparse.triu` + """ + _check_A_type(A) + A = sparse.coo_matrix(A, copy=False) + mask = A.row + k <= A.col + return _masked_coo(A, mask).asformat(format) + + +def _check_A_type(A): + if not (isinstance(A, cupy.ndarray) or cupyx.scipy.sparse.isspmatrix(A)): + msg = 'A must be cupy.ndarray or cupyx.scipy.sparse.spmatrix' + raise TypeError(msg) + + +def _masked_coo(A, mask): + row = A.row[mask] + col = A.col[mask] + data = A.data[mask] + return sparse.coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_index.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_index.py new file mode 100644 index 0000000000000000000000000000000000000000..8efa59e7051ec2d3e154c64999234ce06e18722d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_index.py @@ -0,0 +1,703 @@ +"""Indexing mixin for sparse matrix classes. +""" + +import cupy +from cupy import _core + +from cupyx.scipy.sparse._base import isspmatrix +from cupyx.scipy.sparse._base import spmatrix + +from cupy.cuda import device +from cupy.cuda import runtime + +import numpy + +try: + import scipy.sparse + scipy_available = True +except ImportError: + scipy_available = False + +_int_scalar_types = (int, numpy.integer, numpy.int_) +_bool_scalar_types = (bool, numpy.bool_) + + +_compress_getitem_kern = _core.ElementwiseKernel( + 'T d, S ind, int32 minor', 'raw T answer', + 'if (ind == minor) atomicAdd(&answer[0], d);', + 'cupyx_scipy_sparse_compress_getitem') + + +_compress_getitem_complex_kern = _core.ElementwiseKernel( + 'T real, T imag, S ind, int32 minor', + 'raw T answer_real, raw T answer_imag', + ''' + if (ind == minor) { + atomicAdd(&answer_real[0], real); + atomicAdd(&answer_imag[0], imag); + } + ''', + 'cupyx_scipy_sparse_compress_getitem_complex') + + +def _get_csr_submatrix_major_axis(Ax, Aj, Ap, start, stop): + """Return a submatrix of the input sparse matrix by slicing major axis. + + Args: + Ax (cupy.ndarray): data array from input sparse matrix + Aj (cupy.ndarray): indices array from input sparse matrix + Ap (cupy.ndarray): indptr array from input sparse matrix + start (int): starting index of major axis + stop (int): ending index of major axis + + Returns: + Bx (cupy.ndarray): data array of output sparse matrix + Bj (cupy.ndarray): indices array of output sparse matrix + Bp (cupy.ndarray): indptr array of output sparse matrix + + """ + Ap = Ap[start:stop + 1] + start_offset, stop_offset = int(Ap[0]), int(Ap[-1]) + Bp = Ap - start_offset + Bj = Aj[start_offset:stop_offset] + Bx = Ax[start_offset:stop_offset] + + return Bx, Bj, Bp + + +def _get_csr_submatrix_minor_axis(Ax, Aj, Ap, start, stop): + """Return a submatrix of the input sparse matrix by slicing minor axis. + + Args: + Ax (cupy.ndarray): data array from input sparse matrix + Aj (cupy.ndarray): indices array from input sparse matrix + Ap (cupy.ndarray): indptr array from input sparse matrix + start (int): starting index of minor axis + stop (int): ending index of minor axis + + Returns: + Bx (cupy.ndarray): data array of output sparse matrix + Bj (cupy.ndarray): indices array of output sparse matrix + Bp (cupy.ndarray): indptr array of output sparse matrix + + """ + mask = (start <= Aj) & (Aj < stop) + mask_sum = cupy.empty(Aj.size + 1, dtype=Aj.dtype) + mask_sum[0] = 0 + mask_sum[1:] = mask + cupy.cumsum(mask_sum, out=mask_sum) + Bp = mask_sum[Ap] + Bj = Aj[mask] - start + Bx = Ax[mask] + + return Bx, Bj, Bp + + +_csr_row_index_ker = _core.ElementwiseKernel( + 'int32 out_rows, raw I rows, ' + 'raw int32 Ap, raw int32 Aj, raw T Ax, raw int32 Bp', + 'int32 Bj, T Bx', + ''' + const I row = rows[out_rows]; + + // Look up starting offset + const I starting_output_offset = Bp[out_rows]; + const I output_offset = i - starting_output_offset; + const I starting_input_offset = Ap[row]; + + Bj = Aj[starting_input_offset + output_offset]; + Bx = Ax[starting_input_offset + output_offset]; +''', 'cupyx_scipy_sparse_csr_row_index_ker') + + +def _csr_row_index(Ax, Aj, Ap, rows): + """Populate indices and data arrays from the given row index + Args: + Ax (cupy.ndarray): data array from input sparse matrix + Aj (cupy.ndarray): indices array from input sparse matrix + Ap (cupy.ndarray): indptr array from input sparse matrix + rows (cupy.ndarray): index array of rows to populate + Returns: + Bx (cupy.ndarray): data array of output sparse matrix + Bj (cupy.ndarray): indices array of output sparse matrix + Bp (cupy.ndarray): indptr array for output sparse matrix + """ + row_nnz = cupy.diff(Ap) + Bp = cupy.empty(rows.size + 1, dtype=Ap.dtype) + Bp[0] = 0 + cupy.cumsum(row_nnz[rows], out=Bp[1:]) + nnz = int(Bp[-1]) + + out_rows = _csr_indptr_to_coo_rows(nnz, Bp) + + Bj, Bx = _csr_row_index_ker(out_rows, rows, Ap, Aj, Ax, Bp) + return Bx, Bj, Bp + + +def _csr_indptr_to_coo_rows(nnz, Bp): + from cupy_backends.cuda.libs import cusparse + + out_rows = cupy.empty(nnz, dtype=numpy.int32) + + # Build a COO row array from output CSR indptr. + # Calling backend cusparse API directly to avoid + # constructing a whole COO object. + handle = device.get_cusparse_handle() + if runtime.is_hip and nnz == 0: + raise ValueError('hipSPARSE currently cannot handle ' + 'sparse matrices with null ptrs') + cusparse.xcsr2coo( + handle, Bp.data.ptr, nnz, Bp.size-1, out_rows.data.ptr, + cusparse.CUSPARSE_INDEX_BASE_ZERO) + + return out_rows + + +def _select_last_indices(i, j, x, idx_dtype): + """Find the unique indices for each row and keep only the last""" + i = cupy.asarray(i, dtype=idx_dtype) + j = cupy.asarray(j, dtype=idx_dtype) + + stacked = cupy.stack([j, i]) + order = cupy.lexsort(stacked).astype(idx_dtype) + + indptr_inserts = i[order] + indices_inserts = j[order] + data_inserts = x[order] + + mask = cupy.ones(indptr_inserts.size, dtype='bool') + _unique_mask_kern(indptr_inserts, indices_inserts, order, mask, + size=indptr_inserts.size-1) + + return indptr_inserts[mask], indices_inserts[mask], data_inserts[mask] + + +_insert_many_populate_arrays = _core.ElementwiseKernel( + '''raw I insert_indices, raw T insert_values, raw I insertion_indptr, + raw I Ap, raw I Aj, raw T Ax, raw I Bp''', + 'raw I Bj, raw T Bx', ''' + + const I input_row_start = Ap[i]; + const I input_row_end = Ap[i+1]; + const I input_count = input_row_end - input_row_start; + + const I insert_row_start = insertion_indptr[i]; + const I insert_row_end = insertion_indptr[i+1]; + const I insert_count = insert_row_end - insert_row_start; + + I input_offset = 0; + I insert_offset = 0; + + I output_n = Bp[i]; + + I cur_existing_index = -1; + T cur_existing_value = -1; + + I cur_insert_index = -1; + T cur_insert_value = -1; + + if(input_offset < input_count) { + cur_existing_index = Aj[input_row_start+input_offset]; + cur_existing_value = Ax[input_row_start+input_offset]; + } + + if(insert_offset < insert_count) { + cur_insert_index = insert_indices[insert_row_start+insert_offset]; + cur_insert_value = insert_values[insert_row_start+insert_offset]; + } + + + for(I jj = 0; jj < input_count + insert_count; jj++) { + + // if we have both available, use the lowest one. + if(input_offset < input_count && + insert_offset < insert_count) { + + if(cur_existing_index < cur_insert_index) { + Bj[output_n] = cur_existing_index; + Bx[output_n] = cur_existing_value; + + ++input_offset; + + if(input_offset < input_count) { + cur_existing_index = Aj[input_row_start+input_offset]; + cur_existing_value = Ax[input_row_start+input_offset]; + } + + + } else { + Bj[output_n] = cur_insert_index; + Bx[output_n] = cur_insert_value; + + ++insert_offset; + if(insert_offset < insert_count) { + cur_insert_index = + insert_indices[insert_row_start+insert_offset]; + cur_insert_value = + insert_values[insert_row_start+insert_offset]; + } + } + + } else if(input_offset < input_count) { + Bj[output_n] = cur_existing_index; + Bx[output_n] = cur_existing_value; + + ++input_offset; + if(input_offset < input_count) { + cur_existing_index = Aj[input_row_start+input_offset]; + cur_existing_value = Ax[input_row_start+input_offset]; + } + + } else { + Bj[output_n] = cur_insert_index; + Bx[output_n] = cur_insert_value; + + ++insert_offset; + if(insert_offset < insert_count) { + cur_insert_index = + insert_indices[insert_row_start+insert_offset]; + cur_insert_value = + insert_values[insert_row_start+insert_offset]; + } + } + + output_n++; + } + ''', 'cupyx_scipy_sparse_csr_copy_existing_indices_kern', no_return=True) + + +# Create a filter mask based on the lowest value of order +_unique_mask_kern = _core.ElementwiseKernel( + '''raw I rows, raw I cols, raw I order''', + '''raw bool mask''', + """ + I cur_row = rows[i]; + I next_row = rows[i+1]; + + I cur_col = cols[i]; + I next_col = cols[i+1]; + + I cur_order = order[i]; + I next_order = order[i+1]; + + if(cur_row == next_row && cur_col == next_col) { + if(cur_order < next_order) + mask[i] = false; + else + mask[i+1] = false; + } + """, + 'cupyx_scipy_sparse_unique_mask_kern', + no_return=True +) + + +def _csr_sample_values(n_row, n_col, + Ap, Aj, Ax, + Bi, Bj, not_found_val=0): + """Populate data array for a set of rows and columns + Args + n_row : total number of rows in input array + n_col : total number of columns in input array + Ap : indptr array for input sparse matrix + Aj : indices array for input sparse matrix + Ax : data array for input sparse matrix + Bi : array of rows to extract from input sparse matrix + Bj : array of columns to extract from input sparse matrix + Returns + Bx : data array for output sparse matrix + """ + + Bi[Bi < 0] += n_row + Bj[Bj < 0] += n_col + + return _csr_sample_values_kern(n_row, n_col, + Ap, Aj, Ax, + Bi, Bj, + not_found_val, + size=Bi.size) + + +_csr_sample_values_kern = _core.ElementwiseKernel( + '''I n_row, I n_col, raw I Ap, raw I Aj, raw T Ax, + raw I Bi, raw I Bj, I not_found_val''', + 'raw T Bx', ''' + const I j = Bi[i]; // sample row + const I k = Bj[i]; // sample column + const I row_start = Ap[j]; + const I row_end = Ap[j+1]; + T x = 0; + bool val_found = false; + for(I jj = row_start; jj < row_end; jj++) { + if (Aj[jj] == k) { + x += Ax[jj]; + val_found = true; + } + } + Bx[i] = val_found ? x : not_found_val; +''', 'cupyx_scipy_sparse_csr_sample_values_kern') + + +class IndexMixin(object): + """ + This class provides common dispatching and validation logic for indexing. + """ + + def __getitem__(self, key): + + # For testing- Scipy >= 1.4.0 is needed to guarantee + # results match. + if scipy_available and numpy.lib.NumpyVersion( + scipy.__version__) < '1.4.0': + raise NotImplementedError( + "Sparse __getitem__() requires Scipy >= 1.4.0") + + row, col = self._parse_indices(key) + + # Dispatch to specialized methods. + if isinstance(row, _int_scalar_types): + if isinstance(col, _int_scalar_types): + return self._get_intXint(row, col) + elif isinstance(col, slice): + return self._get_intXslice(row, col) + elif col.ndim == 1: + return self._get_intXarray(row, col) + raise IndexError('index results in >2 dimensions') + elif isinstance(row, slice): + if isinstance(col, _int_scalar_types): + return self._get_sliceXint(row, col) + elif isinstance(col, slice): + if row == slice(None) and row == col: + return self.copy() + return self._get_sliceXslice(row, col) + elif col.ndim == 1: + return self._get_sliceXarray(row, col) + raise IndexError('index results in >2 dimensions') + elif row.ndim == 1: + if isinstance(col, _int_scalar_types): + return self._get_arrayXint(row, col) + elif isinstance(col, slice): + return self._get_arrayXslice(row, col) + else: # row.ndim == 2 + if isinstance(col, _int_scalar_types): + return self._get_arrayXint(row, col) + elif isinstance(col, slice): + raise IndexError('index results in >2 dimensions') + elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1): + # special case for outer indexing + return self._get_columnXarray(row[:, 0], col.ravel()) + + # The only remaining case is inner (fancy) indexing + row, col = cupy.broadcast_arrays(row, col) + if row.shape != col.shape: + raise IndexError('number of row and column indices differ') + if row.size == 0: + return self.__class__(cupy.atleast_2d(row).shape, dtype=self.dtype) + return self._get_arrayXarray(row, col) + + def __setitem__(self, key, x): + row, col = self._parse_indices(key) + + if isinstance(row, _int_scalar_types) and\ + isinstance(col, _int_scalar_types): + x = cupy.asarray(x, dtype=self.dtype) + if x.size != 1: + raise ValueError('Trying to assign a sequence to an item') + self._set_intXint(row, col, x.flat[0]) + return + + if isinstance(row, slice): + row = cupy.arange(*row.indices(self.shape[0]))[:, None] + else: + row = cupy.atleast_1d(row) + + if isinstance(col, slice): + col = cupy.arange(*col.indices(self.shape[1]))[None, :] + if row.ndim == 1: + row = row[:, None] + else: + col = cupy.atleast_1d(col) + + i, j = cupy.broadcast_arrays(row, col) + if i.shape != j.shape: + raise IndexError('number of row and column indices differ') + + if isspmatrix(x): + if i.ndim == 1: + # Inner indexing, so treat them like row vectors. + i = i[None] + j = j[None] + broadcast_row = x.shape[0] == 1 and i.shape[0] != 1 + broadcast_col = x.shape[1] == 1 and i.shape[1] != 1 + if not ((broadcast_row or x.shape[0] == i.shape[0]) and + (broadcast_col or x.shape[1] == i.shape[1])): + raise ValueError('shape mismatch in assignment') + if x.size == 0: + return + x = x.tocoo(copy=True) + x.sum_duplicates() + self._set_arrayXarray_sparse(i, j, x) + else: + # Make x and i into the same shape + x = cupy.asarray(x, dtype=self.dtype) + x, _ = cupy.broadcast_arrays(x, i) + if x.size == 0: + return + x = x.reshape(i.shape) + self._set_arrayXarray(i, j, x) + + def _is_scalar(self, index): + if isinstance(index, (cupy.ndarray, numpy.ndarray)) and \ + index.ndim == 0 and index.size == 1: + return True + return False + + def _parse_indices(self, key): + M, N = self.shape + row, col = _unpack_index(key) + + if self._is_scalar(row): + row = row.item() + if self._is_scalar(col): + col = col.item() + + # Scipy calls sputils.isintlike() rather than + # isinstance(x, _int_scalar_types). Comparing directly to int + # here to minimize the impact of nested exception catching + + if isinstance(row, _int_scalar_types): + row = _normalize_index(row, M, 'row') + elif not isinstance(row, slice): + row = self._asindices(row, M) + + if isinstance(col, _int_scalar_types): + col = _normalize_index(col, N, 'column') + elif not isinstance(col, slice): + col = self._asindices(col, N) + + return row, col + + def _asindices(self, idx, length): + """Convert `idx` to a valid index for an axis with a given length. + Subclasses that need special validation can override this method. + + idx is assumed to be at least a 1-dimensional array-like, but can + have no more than 2 dimensions. + """ + try: + x = cupy.asarray(idx, dtype=self.indices.dtype) + except (ValueError, TypeError, MemoryError): + raise IndexError('invalid index') + + if x.ndim not in (1, 2): + raise IndexError('Index dimension must be <= 2') + + return x % length + + def getrow(self, i): + """Return a copy of row i of the matrix, as a (1 x n) row vector. + + Args: + i (integer): Row + + Returns: + cupyx.scipy.sparse.spmatrix: Sparse matrix with single row + """ + M, N = self.shape + i = _normalize_index(i, M, 'index') + return self._get_intXslice(i, slice(None)) + + def getcol(self, i): + """Return a copy of column i of the matrix, as a (m x 1) column vector. + + Args: + i (integer): Column + + Returns: + cupyx.scipy.sparse.spmatrix: Sparse matrix with single column + """ + M, N = self.shape + i = _normalize_index(i, N, 'index') + return self._get_sliceXint(slice(None), i) + + def _get_intXint(self, row, col): + raise NotImplementedError() + + def _get_intXarray(self, row, col): + raise NotImplementedError() + + def _get_intXslice(self, row, col): + raise NotImplementedError() + + def _get_sliceXint(self, row, col): + raise NotImplementedError() + + def _get_sliceXslice(self, row, col): + raise NotImplementedError() + + def _get_sliceXarray(self, row, col): + raise NotImplementedError() + + def _get_arrayXint(self, row, col): + raise NotImplementedError() + + def _get_arrayXslice(self, row, col): + raise NotImplementedError() + + def _get_columnXarray(self, row, col): + raise NotImplementedError() + + def _get_arrayXarray(self, row, col): + raise NotImplementedError() + + def _set_intXint(self, row, col, x): + raise NotImplementedError() + + def _set_arrayXarray(self, row, col, x): + raise NotImplementedError() + + def _set_arrayXarray_sparse(self, row, col, x): + # Fall back to densifying x + x = cupy.asarray(x.toarray(), dtype=self.dtype) + x, _ = cupy.broadcast_arrays(x, row) + self._set_arrayXarray(row, col, x) + + +def _try_is_scipy_spmatrix(index): + if scipy_available: + return isinstance(index, scipy.sparse.spmatrix) + return False + + +def _unpack_index(index): + """ Parse index. Always return a tuple of the form (row, col). + Valid type for row/col is integer, slice, or array of integers. + + Returns: + resulting row & col indices : single integer, slice, or + array of integers. If row & column indices are supplied + explicitly, they are used as the major/minor indices. + If only one index is supplied, the minor index is + assumed to be all (e.g., [maj, :]). + """ + # First, check if indexing with single boolean matrix. + if ((isinstance(index, (spmatrix, cupy.ndarray, + numpy.ndarray)) + or _try_is_scipy_spmatrix(index)) + and index.ndim == 2 and index.dtype.kind == 'b'): + return index.nonzero() + + # Parse any ellipses. + index = _eliminate_ellipsis(index) + + # Next, parse the tuple or object + if isinstance(index, tuple): + if len(index) == 2: + row, col = index + elif len(index) == 1: + row, col = index[0], slice(None) + else: + raise IndexError('invalid number of indices') + else: + idx = _compatible_boolean_index(index) + if idx is None: + row, col = index, slice(None) + elif idx.ndim < 2: + return _boolean_index_to_array(idx), slice(None) + elif idx.ndim == 2: + return idx.nonzero() + # Next, check for validity and transform the index as needed. + if isspmatrix(row) or isspmatrix(col): + # Supporting sparse boolean indexing with both row and col does + # not work because spmatrix.ndim is always 2. + raise IndexError( + 'Indexing with sparse matrices is not supported ' + 'except boolean indexing where matrix and index ' + 'are equal shapes.') + bool_row = _compatible_boolean_index(row) + bool_col = _compatible_boolean_index(col) + if bool_row is not None: + row = _boolean_index_to_array(bool_row) + if bool_col is not None: + col = _boolean_index_to_array(bool_col) + return row, col + + +def _eliminate_ellipsis(index): + """Process indices with Ellipsis. Returns modified index.""" + if index is Ellipsis: + return (slice(None), slice(None)) + + if not isinstance(index, tuple): + return index + + # Find first ellipsis. + for j, v in enumerate(index): + if v is Ellipsis: + first_ellipsis = j + break + else: + return index + + # Try to expand it using shortcuts for common cases + if len(index) == 1: + return (slice(None), slice(None)) + if len(index) == 2: + if first_ellipsis == 0: + if index[1] is Ellipsis: + return (slice(None), slice(None)) + return (slice(None), index[1]) + return (index[0], slice(None)) + + # Expand it using a general-purpose algorithm + tail = [] + for v in index[first_ellipsis+1:]: + if v is not Ellipsis: + tail.append(v) + nd = first_ellipsis + len(tail) + nslice = max(0, 2 - nd) + return index[:first_ellipsis] + (slice(None),) * nslice + tuple(tail,) + + +def _normalize_index(x, dim, name): + if x < -dim or x >= dim: + raise IndexError('{} ({}) out of range'.format(name, x)) + if x < 0: + x += dim + return x + + +def _first_element_bool(idx, max_dim=2): + """Returns True if first element of the incompatible + array type is boolean. + """ + if max_dim < 1: + return None + try: + first = idx[0] if len(idx) > 0 else None + except TypeError: + return None + if isinstance(first, _bool_scalar_types): + return True + return _first_element_bool(first, max_dim-1) + + +def _compatible_boolean_index(idx): + """Returns a boolean index array that can be converted to + integer array. Returns None if no such array exists. + """ + # presence of attribute `ndim` indicates a compatible array type. + if hasattr(idx, 'ndim'): + if idx.dtype.kind == 'b': + return idx + # non-ndarray bool collection should be converted to ndarray + elif _first_element_bool(idx): + return cupy.asarray(idx, dtype='bool') + return None + + +def _boolean_index_to_array(idx): + if idx.ndim > 1: + raise IndexError('invalid index shape') + idx = cupy.array(idx, dtype=idx.dtype) + return cupy.where(idx)[0] diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_sputils.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_sputils.py new file mode 100644 index 0000000000000000000000000000000000000000..87bb7f3008577f0316e76d1897b29bf05e144966 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_sputils.py @@ -0,0 +1,169 @@ +import cupy +import operator +import numpy + +from cupy._core._dtype import get_dtype + +supported_dtypes = [get_dtype(x) for x in + ('single', 'double', 'csingle', 'cdouble')] + +_upcast_memo: dict = {} + + +def isdense(x): + return isinstance(x, cupy.ndarray) + + +def isscalarlike(x): + """Is x either a scalar, an array scalar, or a 0-dim array?""" + return cupy.isscalar(x) or (isdense(x) and x.ndim == 0) + + +def get_index_dtype(arrays=(), maxval=None, check_contents=False): + """Based on input (integer) arrays ``a``, determines a suitable index data + type that can hold the data in the arrays. + + Args: + arrays (tuple of array_like): + Input arrays whose types/contents to check + maxval (float, optional): + Maximum value needed + check_contents (bool, optional): + Whether to check the values in the arrays and not just their types. + Default: False (check only the types) + + Returns: + dtype: Suitable index data type (int32 or int64) + """ + + int32min = cupy.iinfo(cupy.int32).min + int32max = cupy.iinfo(cupy.int32).max + + dtype = cupy.int32 + if maxval is not None: + if maxval > int32max: + dtype = cupy.int64 + + if isinstance(arrays, cupy.ndarray): + arrays = (arrays,) + + for arr in arrays: + arr = cupy.asarray(arr) + if not cupy.can_cast(arr.dtype, cupy.int32): + if check_contents: + if arr.size == 0: + # a bigger type not needed + continue + elif cupy.issubdtype(arr.dtype, cupy.integer): + maxval = arr.max() + minval = arr.min() + if minval >= int32min and maxval <= int32max: + # a bigger type not needed + continue + + dtype = cupy.int64 + break + + return dtype + + +def validateaxis(axis): + if axis is not None: + # In NumPy, you can pass in tuples for 'axis', but they are + # not very useful for sparse matrices given their limited + # dimensions, so let's make it explicit that they are not + # allowed to be passed in + if isinstance(axis, tuple): + raise TypeError(("Tuples are not accepted for the 'axis' " + "parameter. Please pass in one of the " + "following: {-2, -1, 0, 1, None}.")) + + axis_type = type(axis) + + # If not a tuple, check that the provided axis is actually + # an integer and raise a TypeError similar to NumPy's + if not cupy.issubdtype(cupy.dtype(axis_type), cupy.integer): + raise TypeError("axis must be an integer, not {name}" + .format(name=axis_type.__name__)) + + if not (-2 <= axis <= 1): + raise ValueError("axis out of range") + + +def upcast(*args): + """Returns the nearest supported sparse dtype for the + combination of one or more types. + + upcast(t0, t1, ..., tn) -> T where T is a supported dtype + + Examples: + >>> upcast('int32') + + >>> upcast('int32','float32') + + >>> upcast('bool',float) + + """ + + t = _upcast_memo.get(args) + if t is not None: + return t + + upcast = numpy.result_type(*args) + + for t in supported_dtypes: + if cupy.can_cast(upcast, t): + _upcast_memo[args] = t + return t + + raise TypeError('no supported conversion for types: %r' % (args,)) + + +def check_shape(args, current_shape=None): + """Check validity of the shape""" + + if len(args) == 0: + raise TypeError("function missing 1 required positional argument: " + "'shape'") + + elif len(args) == 1: + try: + shape_iter = iter(args[0]) + except TypeError: + new_shape = (operator.index(args[0]), ) + else: + new_shape = tuple(operator.index(arg) for arg in shape_iter) + else: + new_shape = tuple(operator.index(arg) for arg in args) + + if current_shape is None: + if len(new_shape) != 2: + raise ValueError('shape must be a 2-tuple of positive integers') + elif new_shape[0] < 0 or new_shape[1] < 0: + raise ValueError("'shape' elements cannot be negative") + + else: + current_size = numpy.prod(current_shape) + + negative_indexes = [i for i, x in enumerate(new_shape) if x < 0] + if len(negative_indexes) == 0: + new_size = numpy.prod(new_shape) + if new_size != current_size: + raise ValueError('cannot reshape array of size {} into shape' + '{}'.format(current_size, new_shape)) + elif len(negative_indexes) == 1: + skip = negative_indexes[0] + specified = numpy.prod(new_shape[0:skip] + new_shape[skip+1:]) + unspecified, remainder = divmod(current_size, specified) + if remainder != 0: + err_shape = tuple('newshape'if x < 0 else x for x in new_shape) + raise ValueError('cannot reshape array of size {} into shape' + '{}'.format(current_size, err_shape)) + new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:] + else: + raise ValueError('can only specify one unknown dimension') + + if len(new_shape) != 2: + raise ValueError('matrix shape must be two-dimensional') + + return new_shape diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_util.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..99f60e5d36a7b75b243fce8a52c864731f3689fa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_util.py @@ -0,0 +1,26 @@ +import cupy +from cupy._core import core + + +def isdense(x): + return isinstance(x, core.ndarray) + + +def isintlike(x): + try: + return bool(int(x) == x) + except (TypeError, ValueError): + return False + + +def isscalarlike(x): + return cupy.isscalar(x) or (isdense(x) and x.ndim == 0) + + +def isshape(x): + if not isinstance(x, tuple) or len(x) != 2: + return False + m, n = x + if isinstance(n, tuple): + return False + return isintlike(m) and isintlike(n) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8557ca75a0457c0eea9d4e08bba7452ce77cb248 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__init__.py @@ -0,0 +1,4 @@ +# Functions from the following SciPy document +# https://docs.scipy.org/doc/scipy/reference/sparse.csgraph.html + +from cupyx.scipy.sparse.csgraph._traversal import connected_components # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..243e869ce1a847889b1dfa25f87e5f4ed97c2aee Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/_traversal.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/_traversal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ff1271e751306114740abd19a00f92285008da4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/_traversal.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/_traversal.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/_traversal.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b92d13f78d6200ef4b51b25c02f5cb41e33a32 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/_traversal.py @@ -0,0 +1,119 @@ +import cupy +import cupyx.scipy.sparse +try: + import pylibcugraph + pylibcugraph_available = True +except ModuleNotFoundError: + pylibcugraph_available = False + + +def connected_components(csgraph, directed=True, connection='weak', + return_labels=True): + """Analyzes the connected components of a sparse graph + + Args: + csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency + matrix representing connectivity among nodes. + directed (bool): If ``True``, it operates on a directed graph. If + ``False``, it operates on an undirected graph. + connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the + type of connection to use. Nodes i and j are "strongly" connected + only when a path exists both from i to j and from j to i. + If ``directed`` is ``False``, this argument is ignored. + return_labels (bool): If ``True``, it returns the labels for each of + the connected components. + + Returns: + tuple of int and cupy.ndarray, or int: + If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``, + where ``n`` is the number of connected components and ``labels`` is + labels of each connected components. Otherwise, returns ``n``. + + .. seealso:: :func:`scipy.sparse.csgraph.connected_components` + """ + if not pylibcugraph_available: + raise RuntimeError('pylibcugraph is not available') + + connection = connection.lower() + if connection not in ('weak', 'strong'): + raise ValueError("connection must be 'weak' or 'strong'") + + if not directed: + connection = 'weak' + + if csgraph.ndim != 2: + raise ValueError('graph should have two dimensions') + + if not cupyx.scipy.sparse.isspmatrix_csr(csgraph): + csgraph = cupyx.scipy.sparse.csr_matrix(csgraph) + m, m1 = csgraph.shape + if m != m1: + raise ValueError('graph should be a square array') + if csgraph.nnz == 0: + return m, cupy.arange(m, dtype=csgraph.indices.dtype) + + if connection == 'strong': + labels = cupy.empty(m, dtype=csgraph.indices.dtype) + pylibcugraph.strongly_connected_components( + offsets=csgraph.indptr, indices=csgraph.indices, weights=None, + num_verts=m, num_edges=csgraph.nnz, labels=labels) + else: + csgraph += csgraph.T + if not cupyx.scipy.sparse.isspmatrix_csr(csgraph): + csgraph = cupyx.scipy.sparse.csr_matrix(csgraph) + _, labels = pylibcugraph.weakly_connected_components( + resource_handle=None, + graph=None, + indices=csgraph.indices, + offsets=csgraph.indptr, + weights=None, + labels=None, + do_expensive_check=False, + ) + + count = cupy.zeros((1,), dtype=csgraph.indices.dtype) + root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype) + _cupy_count_components(labels, count, root_labels, size=m) + n = int(count[0]) + if not return_labels: + return n + _cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels) + return n, labels + + +_cupy_count_components = cupy.ElementwiseKernel( + '', + 'raw I labels, raw int32 count, raw int32 root_labels', + ''' + int j = i; + while (j != labels[j]) { j = labels[j]; } + if (j != i) { + labels[i] = j; + } else { + int k = atomicAdd(&count[0], 1); + root_labels[k] = i; + } + ''', + '_cupy_count_components') + + +_cupy_adjust_labels = cupy.ElementwiseKernel( + 'int32 n_root_labels, raw I root_labels', + 'I labels', + ''' + int cur_label = labels; + int j_min = 0; + int j_max = n_root_labels - 1; + int j = (j_min + j_max) / 2; + while (j_min < j_max) { + if (cur_label == root_labels[j]) break; + if (cur_label < root_labels[j]) { + j_max = j - 1; + } else { + j_min = j + 1; + } + j = (j_min + j_max) / 2; + } + labels = j; + ''', + '_cupy_adjust_labels') diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d682600ae999e903544b8e35abfa868b0a77334b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__init__.py @@ -0,0 +1,22 @@ +# Functions from the following SciPy document +# https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html + +# "NOQA" to suppress flake8 warning +from cupyx.scipy.sparse.linalg._norm import norm # NOQA +from cupyx.scipy.sparse.linalg._solve import spsolve # NOQA +from cupyx.scipy.sparse.linalg._solve import spsolve_triangular # NOQA +from cupyx.scipy.sparse.linalg._solve import factorized # NOQA +from cupyx.scipy.sparse.linalg._solve import lsqr # NOQA +from cupyx.scipy.sparse.linalg._solve import lsmr # NOQA +from cupyx.scipy.sparse.linalg._solve import splu # NOQA +from cupyx.scipy.sparse.linalg._solve import spilu # NOQA +from cupyx.scipy.sparse.linalg._solve import SuperLU # NOQA +from cupyx.scipy.sparse.linalg._solve import minres # NOQA +from cupyx.scipy.sparse.linalg._eigen import eigsh # NOQA +from cupyx.scipy.sparse.linalg._eigen import svds # NOQA +from cupyx.scipy.sparse.linalg._iterative import cg # NOQA +from cupyx.scipy.sparse.linalg._iterative import gmres # NOQA +from cupyx.scipy.sparse.linalg._iterative import cgs # NOQA +from cupyx.scipy.sparse.linalg._interface import LinearOperator # NOQA +from cupyx.scipy.sparse.linalg._interface import aslinearoperator # NOQA +from cupyx.scipy.sparse.linalg._lobpcg import lobpcg # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f3ad7d05e842f27ea8b04f5b8750ddfd3ac9acb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_eigen.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_eigen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e3154db7b4fdf2a6cf4130a5bd673050e2d5e9a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_eigen.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103622cf528af9e7bc27c1697c2c92216d522e4c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_iterative.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_iterative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4f9b5abbe8a727e9f1bebaa17c08825f45bf861 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_iterative.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_lobpcg.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_lobpcg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51fd753fa240132f1569d0e3a24ec534a0669bc9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_lobpcg.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbcf452311eccfff012ad24f35ded332e7bbe181 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_solve.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_solve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9fe1c2130e758b11dd6fea9333723b9d1acd220 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_solve.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_eigen.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_eigen.py new file mode 100644 index 0000000000000000000000000000000000000000..3038809f83cf5379c4b251b1b94d43169740b44c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_eigen.py @@ -0,0 +1,430 @@ +import numpy +import cupy + +from cupy import cublas +from cupy._core import _dtype +from cupy.cuda import device +from cupy_backends.cuda.libs import cublas as _cublas +from cupyx.scipy.sparse import _csr +from cupyx.scipy.sparse.linalg import _interface + + +def eigsh(a, k=6, *, which='LM', v0=None, ncv=None, maxiter=None, + tol=0, return_eigenvectors=True): + """ + Find ``k`` eigenvalues and eigenvectors of the real symmetric square + matrix or complex Hermitian matrix ``A``. + + Solves ``Ax = wx``, the standard eigenvalue problem for ``w`` eigenvalues + with corresponding eigenvectors ``x``. + + Args: + a (ndarray, spmatrix or LinearOperator): A symmetric square matrix with + dimension ``(n, n)``. ``a`` must :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + k (int): The number of eigenvalues and eigenvectors to compute. Must be + ``1 <= k < n``. + which (str): 'LM' or 'LA' or 'SA'. + 'LM': finds ``k`` largest (in magnitude) eigenvalues. + 'LA': finds ``k`` largest (algebraic) eigenvalues. + 'SA': finds ``k`` smallest (algebraic) eigenvalues. + + v0 (ndarray): Starting vector for iteration. If ``None``, a random + unit vector is used. + ncv (int): The number of Lanczos vectors generated. Must be + ``k + 1 < ncv < n``. If ``None``, default value is used. + maxiter (int): Maximum number of Lanczos update iterations. + If ``None``, default value is used. + tol (float): Tolerance for residuals ``||Ax - wx||``. If ``0``, machine + precision is used. + return_eigenvectors (bool): If ``True``, returns eigenvectors in + addition to eigenvalues. + + Returns: + tuple: + If ``return_eigenvectors is True``, it returns ``w`` and ``x`` + where ``w`` is eigenvalues and ``x`` is eigenvectors. Otherwise, + it returns only ``w``. + + .. seealso:: :func:`scipy.sparse.linalg.eigsh` + + .. note:: + This function uses the thick-restart Lanczos methods + (https://sdm.lbl.gov/~kewu/ps/trlan.html). + + """ + n = a.shape[0] + if a.ndim != 2 or a.shape[0] != a.shape[1]: + raise ValueError('expected square matrix (shape: {})'.format(a.shape)) + if a.dtype.char not in 'fdFD': + raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype)) + if k <= 0: + raise ValueError('k must be greater than 0 (actual: {})'.format(k)) + if k >= n: + raise ValueError('k must be smaller than n (actual: {})'.format(k)) + if which not in ('LM', 'LA', 'SA'): + raise ValueError('which must be \'LM\',\'LA\'or\'SA\' (actual: {})' + ''.format(which)) + if ncv is None: + ncv = min(max(2 * k, k + 32), n - 1) + else: + ncv = min(max(ncv, k + 2), n - 1) + if maxiter is None: + maxiter = 10 * n + if tol == 0: + tol = numpy.finfo(a.dtype).eps + + alpha = cupy.zeros((ncv,), dtype=a.dtype) + beta = cupy.zeros((ncv,), dtype=a.dtype.char.lower()) + V = cupy.empty((ncv, n), dtype=a.dtype) + + # Set initial vector + if v0 is None: + u = cupy.random.random((n,)).astype(a.dtype) + V[0] = u / cublas.nrm2(u) + else: + u = v0 + V[0] = v0 / cublas.nrm2(v0) + + # Choose Lanczos implementation, unconditionally use 'fast' for now + upadte_impl = 'fast' + if upadte_impl == 'fast': + lanczos = _lanczos_fast(a, n, ncv) + else: + lanczos = _lanczos_asis + + # Lanczos iteration + lanczos(a, V, u, alpha, beta, 0, ncv) + + iter = ncv + w, s = _eigsh_solve_ritz(alpha, beta, None, k, which) + x = V.T @ s + + # Compute residual + beta_k = beta[-1] * s[-1, :] + res = cublas.nrm2(beta_k) + + uu = cupy.empty((k,), dtype=a.dtype) + + while res > tol and iter < maxiter: + # Setup for thick-restart + beta[:k] = 0 + alpha[:k] = w + V[:k] = x.T + + # u -= u.T @ V[:k].conj().T @ V[:k] + cublas.gemv(_cublas.CUBLAS_OP_C, 1, V[:k].T, u, 0, uu) + cublas.gemv(_cublas.CUBLAS_OP_N, -1, V[:k].T, uu, 1, u) + V[k] = u / cublas.nrm2(u) + + u[...] = a @ V[k] + cublas.dotc(V[k], u, out=alpha[k]) + u -= alpha[k] * V[k] + u -= V[:k].T @ beta_k + cublas.nrm2(u, out=beta[k]) + V[k+1] = u / beta[k] + + # Lanczos iteration + lanczos(a, V, u, alpha, beta, k + 1, ncv) + + iter += ncv - k + w, s = _eigsh_solve_ritz(alpha, beta, beta_k, k, which) + x = V.T @ s + + # Compute residual + beta_k = beta[-1] * s[-1, :] + res = cublas.nrm2(beta_k) + + if return_eigenvectors: + idx = cupy.argsort(w) + return w[idx], x[:, idx] + else: + return cupy.sort(w) + + +def _lanczos_asis(a, V, u, alpha, beta, i_start, i_end): + for i in range(i_start, i_end): + u[...] = a @ V[i] + cublas.dotc(V[i], u, out=alpha[i]) + u -= u.T @ V[:i+1].conj().T @ V[:i+1] + cublas.nrm2(u, out=beta[i]) + if i >= i_end - 1: + break + V[i+1] = u / beta[i] + + +def _lanczos_fast(A, n, ncv): + from cupy_backends.cuda.libs import cusparse as _cusparse + from cupyx import cusparse + + cublas_handle = device.get_cublas_handle() + cublas_pointer_mode = _cublas.getPointerMode(cublas_handle) + if A.dtype.char == 'f': + dotc = _cublas.sdot + nrm2 = _cublas.snrm2 + gemv = _cublas.sgemv + axpy = _cublas.saxpy + elif A.dtype.char == 'd': + dotc = _cublas.ddot + nrm2 = _cublas.dnrm2 + gemv = _cublas.dgemv + axpy = _cublas.daxpy + elif A.dtype.char == 'F': + dotc = _cublas.cdotc + nrm2 = _cublas.scnrm2 + gemv = _cublas.cgemv + axpy = _cublas.caxpy + elif A.dtype.char == 'D': + dotc = _cublas.zdotc + nrm2 = _cublas.dznrm2 + gemv = _cublas.zgemv + axpy = _cublas.zaxpy + else: + raise TypeError('invalid dtype ({})'.format(A.dtype)) + + cusparse_handle = None + if _csr.isspmatrix_csr(A) and cusparse.check_availability('spmv'): + cusparse_handle = device.get_cusparse_handle() + spmv_op_a = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + spmv_alpha = numpy.array(1.0, A.dtype) + spmv_beta = numpy.array(0.0, A.dtype) + spmv_cuda_dtype = _dtype.to_cuda_dtype(A.dtype) + spmv_alg = _cusparse.CUSPARSE_MV_ALG_DEFAULT + + v = cupy.empty((n,), dtype=A.dtype) + uu = cupy.empty((ncv,), dtype=A.dtype) + vv = cupy.empty((n,), dtype=A.dtype) + b = cupy.empty((), dtype=A.dtype) + one = numpy.array(1.0, dtype=A.dtype) + zero = numpy.array(0.0, dtype=A.dtype) + mone = numpy.array(-1.0, dtype=A.dtype) + + outer_A = A + + def aux(A, V, u, alpha, beta, i_start, i_end): + assert A is outer_A + + # Get ready for spmv if enabled + if cusparse_handle is not None: + # Note: I would like to reuse descriptors and working buffer + # on the next update, but I gave it up because it sometimes + # caused illegal memory access error. + spmv_desc_A = cusparse.SpMatDescriptor.create(A) + spmv_desc_v = cusparse.DnVecDescriptor.create(v) + spmv_desc_u = cusparse.DnVecDescriptor.create(u) + buff_size = _cusparse.spMV_bufferSize( + cusparse_handle, spmv_op_a, spmv_alpha.ctypes.data, + spmv_desc_A.desc, spmv_desc_v.desc, spmv_beta.ctypes.data, + spmv_desc_u.desc, spmv_cuda_dtype, spmv_alg) + spmv_buff = cupy.empty(buff_size, cupy.int8) + + v[...] = V[i_start] + for i in range(i_start, i_end): + # Matrix-vector multiplication + if cusparse_handle is None: + u[...] = A @ v + else: + _cusparse.spMV( + cusparse_handle, spmv_op_a, spmv_alpha.ctypes.data, + spmv_desc_A.desc, spmv_desc_v.desc, + spmv_beta.ctypes.data, spmv_desc_u.desc, + spmv_cuda_dtype, spmv_alg, spmv_buff.data.ptr) + + # Call dotc: alpha[i] = v.conj().T @ u + _cublas.setPointerMode( + cublas_handle, _cublas.CUBLAS_POINTER_MODE_DEVICE) + try: + dotc(cublas_handle, n, v.data.ptr, 1, u.data.ptr, 1, + alpha.data.ptr + i * alpha.itemsize) + finally: + _cublas.setPointerMode(cublas_handle, cublas_pointer_mode) + + # Orthogonalize: u = u - alpha[i] * v - beta[i - 1] * V[i - 1] + vv.fill(0) + b[...] = beta[i - 1] # cast from real to complex + _cublas.setPointerMode( + cublas_handle, _cublas.CUBLAS_POINTER_MODE_DEVICE) + try: + axpy(cublas_handle, n, + alpha.data.ptr + i * alpha.itemsize, + v.data.ptr, 1, vv.data.ptr, 1) + axpy(cublas_handle, n, + b.data.ptr, + V[i - 1].data.ptr, 1, vv.data.ptr, 1) + finally: + _cublas.setPointerMode(cublas_handle, cublas_pointer_mode) + axpy(cublas_handle, n, + mone.ctypes.data, + vv.data.ptr, 1, u.data.ptr, 1) + + # Reorthogonalize: u -= V @ (V.conj().T @ u) + gemv(cublas_handle, _cublas.CUBLAS_OP_C, + n, i + 1, + one.ctypes.data, V.data.ptr, n, + u.data.ptr, 1, + zero.ctypes.data, uu.data.ptr, 1) + gemv(cublas_handle, _cublas.CUBLAS_OP_N, + n, i + 1, + mone.ctypes.data, V.data.ptr, n, + uu.data.ptr, 1, + one.ctypes.data, u.data.ptr, 1) + alpha[i] += uu[i] + + # Call nrm2 + _cublas.setPointerMode( + cublas_handle, _cublas.CUBLAS_POINTER_MODE_DEVICE) + try: + nrm2(cublas_handle, n, u.data.ptr, 1, + beta.data.ptr + i * beta.itemsize) + finally: + _cublas.setPointerMode(cublas_handle, cublas_pointer_mode) + + # Break here as the normalization below touches V[i+1] + if i >= i_end - 1: + break + + # Normalize + _kernel_normalize(u, beta, i, n, v, V) + + return aux + + +_kernel_normalize = cupy.ElementwiseKernel( + 'T u, raw S beta, int32 j, int32 n', 'T v, raw T V', + 'v = u / beta[j]; V[i + (j+1) * n] = v;', 'cupy_eigsh_normalize' +) + + +def _eigsh_solve_ritz(alpha, beta, beta_k, k, which): + # Note: This is done on the CPU, because there is an issue in + # cupy.linalg.eigh with CUDA 9.2, which can return NaNs. It will has little + # impact on performance, since the matrix size processed here is not large. + alpha = cupy.asnumpy(alpha) + beta = cupy.asnumpy(beta) + t = numpy.diag(alpha) + t = t + numpy.diag(beta[:-1], k=1) + t = t + numpy.diag(beta[:-1], k=-1) + if beta_k is not None: + beta_k = cupy.asnumpy(beta_k) + t[k, :k] = beta_k + t[:k, k] = beta_k + w, s = numpy.linalg.eigh(t) + + # Pick-up k ritz-values and ritz-vectors + if which == 'LA': + idx = numpy.argsort(w) + wk = w[idx[-k:]] + sk = s[:, idx[-k:]] + elif which == 'LM': + idx = numpy.argsort(numpy.absolute(w)) + wk = w[idx[-k:]] + sk = s[:, idx[-k:]] + + elif which == 'SA': + idx = numpy.argsort(w) + wk = w[idx[:k]] + sk = s[:, idx[:k]] + # elif which == 'SM': #dysfunctional + # idx = cupy.argsort(abs(w)) + # wk = w[idx[:k]] + # sk = s[:,idx[:k]] + return cupy.array(wk), cupy.array(sk) + + +def svds(a, k=6, *, ncv=None, tol=0, which='LM', maxiter=None, + return_singular_vectors=True): + """Finds the largest ``k`` singular values/vectors for a sparse matrix. + + Args: + a (ndarray, spmatrix or LinearOperator): A real or complex array with + dimension ``(m, n)``. ``a`` must :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + k (int): The number of singular values/vectors to compute. Must be + ``1 <= k < min(m, n)``. + ncv (int): The number of Lanczos vectors generated. Must be + ``k + 1 < ncv < min(m, n)``. If ``None``, default value is used. + tol (float): Tolerance for singular values. If ``0``, machine precision + is used. + which (str): Only 'LM' is supported. 'LM': finds ``k`` largest singular + values. + maxiter (int): Maximum number of Lanczos update iterations. + If ``None``, default value is used. + return_singular_vectors (bool): If ``True``, returns singular vectors + in addition to singular values. + + Returns: + tuple: + If ``return_singular_vectors`` is ``True``, it returns ``u``, ``s`` + and ``vt`` where ``u`` is left singular vectors, ``s`` is singular + values and ``vt`` is right singular vectors. Otherwise, it returns + only ``s``. + + .. seealso:: :func:`scipy.sparse.linalg.svds` + + .. note:: + This is a naive implementation using cupyx.scipy.sparse.linalg.eigsh as + an eigensolver on ``a.H @ a`` or ``a @ a.H``. + + """ + if a.ndim != 2: + raise ValueError('expected 2D (shape: {})'.format(a.shape)) + if a.dtype.char not in 'fdFD': + raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype)) + m, n = a.shape + if k <= 0: + raise ValueError('k must be greater than 0 (actual: {})'.format(k)) + if k >= min(m, n): + raise ValueError('k must be smaller than min(m, n) (actual: {})' + ''.format(k)) + + a = _interface.aslinearoperator(a) + if m >= n: + aH, a = a.H, a + else: + aH, a = a, a.H + + if return_singular_vectors: + w, x = eigsh(aH @ a, k=k, which=which, ncv=ncv, maxiter=maxiter, + tol=tol, return_eigenvectors=True) + else: + w = eigsh(aH @ a, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol, + return_eigenvectors=False) + + w = cupy.maximum(w, 0) + t = w.dtype.char.lower() + factor = {'f': 1e3, 'd': 1e6} + cond = factor[t] * numpy.finfo(t).eps + cutoff = cond * cupy.max(w) + above_cutoff = (w > cutoff) + n_large = above_cutoff.sum().item() + s = cupy.zeros_like(w) + s[:n_large] = cupy.sqrt(w[above_cutoff]) + if not return_singular_vectors: + return s + + x = x[:, above_cutoff] + if m >= n: + v = x + u = a @ v / s[:n_large] + else: + u = x + v = a @ u / s[:n_large] + u = _augmented_orthnormal_cols(u, k - n_large) + v = _augmented_orthnormal_cols(v, k - n_large) + + return u, s, v.conj().T + + +def _augmented_orthnormal_cols(x, n_aug): + if n_aug <= 0: + return x + m, n = x.shape + y = cupy.empty((m, n + n_aug), dtype=x.dtype) + y[:, :n] = x + for i in range(n, n + n_aug): + v = cupy.random.random((m, )).astype(x.dtype) + v -= v @ y[:, :i].conj() @ y[:, :i].T + y[:, i] = v / cupy.linalg.norm(v) + return y diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_interface.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee35e02654c738b91b545cc02a4392439ed7a69 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_interface.py @@ -0,0 +1,578 @@ +import warnings + +import cupy + +from cupyx.scipy import sparse +from cupyx.scipy.sparse import _util + + +class LinearOperator(object): + """LinearOperator(shape, matvec, rmatvec=None, matmat=None, dtype=None, \ +rmatmat=None) + + Common interface for performing matrix vector products + + To construct a concrete LinearOperator, either pass appropriate callables + to the constructor of this class, or subclass it. + + Args: + shape (tuple): Matrix dimensions ``(M, N)``. + matvec (callable f(v)): Returns returns ``A * v``. + rmatvec (callable f(v)): Returns ``A^H * v``, where ``A^H`` is the + conjugate transpose of ``A``. + matmat (callable f(V)): Returns ``A * V``, where ``V`` is a dense + matrix with dimensions ``(N, K)``. + dtype (dtype): Data type of the matrix. + rmatmat (callable f(V)): Returns ``A^H * V``, where ``V`` is a dense + matrix with dimensions ``(M, K)``. + + .. seealso:: :class:`scipy.sparse.linalg.LinearOperator` + """ + + ndim = 2 + + def __new__(cls, *args, **kwargs): + if cls is LinearOperator: + # Operate as _CustomLinearOperator factory. + return super(LinearOperator, cls).__new__(_CustomLinearOperator) + else: + obj = super(LinearOperator, cls).__new__(cls) + + if (type(obj)._matvec == LinearOperator._matvec + and type(obj)._matmat == LinearOperator._matmat): + warnings.warn('LinearOperator subclass should implement' + ' at least one of _matvec and _matmat.', + category=RuntimeWarning, stacklevel=2) + + return obj + + def __init__(self, dtype, shape): + """Initialize this :class:`LinearOperator` + """ + if dtype is not None: + dtype = cupy.dtype(dtype) + + shape = tuple(shape) + if not _util.isshape(shape): + raise ValueError('invalid shape %r (must be 2-d)' % (shape,)) + + self.dtype = dtype + self.shape = shape + + def _init_dtype(self): + """Called from subclasses at the end of the `__init__` routine. + """ + if self.dtype is None: + v = cupy.zeros(self.shape[-1]) + self.dtype = self.matvec(v).dtype + + def _matmat(self, X): + """Default matrix-matrix multiplication handler. + """ + + return cupy.hstack([self.matvec(col.reshape(-1, 1)) for col in X.T]) + + def _matvec(self, x): + """Default matrix-vector multiplication handler. + """ + return self.matmat(x.reshape(-1, 1)) + + def matvec(self, x): + """Matrix-vector multiplication. + """ + + M, N = self.shape + + if x.shape != (N,) and x.shape != (N, 1): + raise ValueError('dimension mismatch') + + y = self._matvec(x) + + if x.ndim == 1: + y = y.reshape(M) + elif x.ndim == 2: + y = y.reshape(M, 1) + else: + raise ValueError('invalid shape returned by user-defined matvec()') + + return y + + def rmatvec(self, x): + """Adjoint matrix-vector multiplication. + """ + + M, N = self.shape + + if x.shape != (M,) and x.shape != (M, 1): + raise ValueError('dimension mismatch') + + y = self._rmatvec(x) + + if x.ndim == 1: + y = y.reshape(N) + elif x.ndim == 2: + y = y.reshape(N, 1) + else: + raise ValueError( + 'invalid shape returned by user-defined rmatvec()') + + return y + + def _rmatvec(self, x): + """Default implementation of _rmatvec; defers to adjoint. + """ + if type(self)._adjoint == LinearOperator._adjoint: + # _adjoint not overridden, prevent infinite recursion + raise NotImplementedError + else: + return self.H.matvec(x) + + def matmat(self, X): + """Matrix-matrix multiplication. + """ + + if X.ndim != 2: + raise ValueError('expected 2-d ndarray or matrix, not %d-d' + % X.ndim) + + if X.shape[0] != self.shape[1]: + raise ValueError('dimension mismatch: %r, %r' + % (self.shape, X.shape)) + + Y = self._matmat(X) + + return Y + + def rmatmat(self, X): + """Adjoint matrix-matrix multiplication. + """ + + if X.ndim != 2: + raise ValueError('expected 2-d ndarray or matrix, not %d-d' + % X.ndim) + + if X.shape[0] != self.shape[0]: + raise ValueError('dimension mismatch: %r, %r' + % (self.shape, X.shape)) + + Y = self._rmatmat(X) + return Y + + def _rmatmat(self, X): + """Default implementation of _rmatmat defers to rmatvec or adjoint.""" + if type(self)._adjoint == LinearOperator._adjoint: + return cupy.hstack([self.rmatvec(col.reshape(-1, 1)) + for col in X.T]) + else: + return self.H.matmat(X) + + def __call__(self, x): + return self*x + + def __mul__(self, x): + return self.dot(x) + + def dot(self, x): + """Matrix-matrix or matrix-vector multiplication. + """ + if isinstance(x, LinearOperator): + return _ProductLinearOperator(self, x) + elif cupy.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1: + return self.matvec(x) + elif x.ndim == 2: + return self.matmat(x) + else: + raise ValueError('expected 1-d or 2-d array, got %r' + % x) + + def __matmul__(self, other): + if cupy.isscalar(other): + raise ValueError('Scalar operands are not allowed, ' + 'use \'*\' instead') + return self.__mul__(other) + + def __rmatmul__(self, other): + if cupy.isscalar(other): + raise ValueError('Scalar operands are not allowed, ' + 'use \'*\' instead') + return self.__rmul__(other) + + def __rmul__(self, x): + if cupy.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + return NotImplemented + + def __pow__(self, p): + if cupy.isscalar(p): + return _PowerLinearOperator(self, p) + else: + return NotImplemented + + def __add__(self, x): + if isinstance(x, LinearOperator): + return _SumLinearOperator(self, x) + else: + return NotImplemented + + def __neg__(self): + return _ScaledLinearOperator(self, -1) + + def __sub__(self, x): + return self.__add__(-x) + + def __repr__(self): + M, N = self.shape + if self.dtype is None: + dt = 'unspecified dtype' + else: + dt = 'dtype=' + str(self.dtype) + + return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt) + + def adjoint(self): + """Hermitian adjoint. + """ + return self._adjoint() + + H = property(adjoint) + + def transpose(self): + """Transpose this linear operator. + """ + return self._transpose() + + T = property(transpose) + + def _adjoint(self): + """Default implementation of _adjoint; defers to rmatvec.""" + return _AdjointLinearOperator(self) + + def _transpose(self): + """ Default implementation of _transpose; defers to rmatvec + conj""" + return _TransposedLinearOperator(self) + + +class _CustomLinearOperator(LinearOperator): + """Linear operator defined in terms of user-specified operations.""" + + def __init__(self, shape, matvec, rmatvec=None, matmat=None, + dtype=None, rmatmat=None): + super(_CustomLinearOperator, self).__init__(dtype, shape) + + self.args = () + + self.__matvec_impl = matvec + self.__rmatvec_impl = rmatvec + self.__rmatmat_impl = rmatmat + self.__matmat_impl = matmat + + self._init_dtype() + + def _matmat(self, X): + if self.__matmat_impl is not None: + return self.__matmat_impl(X) + else: + return super(_CustomLinearOperator, self)._matmat(X) + + def _matvec(self, x): + return self.__matvec_impl(x) + + def _rmatvec(self, x): + func = self.__rmatvec_impl + if func is None: + raise NotImplementedError('rmatvec is not defined') + return self.__rmatvec_impl(x) + + def _rmatmat(self, X): + if self.__rmatmat_impl is not None: + return self.__rmatmat_impl(X) + else: + return super(_CustomLinearOperator, self)._rmatmat(X) + + def _adjoint(self): + return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]), + matvec=self.__rmatvec_impl, + rmatvec=self.__matvec_impl, + matmat=self.__rmatmat_impl, + rmatmat=self.__matmat_impl, + dtype=self.dtype) + + +class _AdjointLinearOperator(LinearOperator): + """Adjoint of arbitrary Linear Operator""" + + def __init__(self, A): + shape = (A.shape[1], A.shape[0]) + super(_AdjointLinearOperator, self).__init__( + dtype=A.dtype, shape=shape) + self.A = A + self.args = (A,) + + def _matvec(self, x): + return self.A._rmatvec(x) + + def _rmatvec(self, x): + return self.A._matvec(x) + + def _matmat(self, x): + return self.A._rmatmat(x) + + def _rmatmat(self, x): + return self.A._matmat(x) + + +class _TransposedLinearOperator(LinearOperator): + """Transposition of arbitrary Linear Operator""" + + def __init__(self, A): + shape = (A.shape[1], A.shape[0]) + super(_TransposedLinearOperator, self).__init__( + dtype=A.dtype, shape=shape) + self.A = A + self.args = (A,) + + def _matvec(self, x): + # NB. cupy.conj works also on sparse matrices + return cupy.conj(self.A._rmatvec(cupy.conj(x))) + + def _rmatvec(self, x): + return cupy.conj(self.A._matvec(cupy.conj(x))) + + def _matmat(self, x): + # NB. cupy.conj works also on sparse matrices + return cupy.conj(self.A._rmatmat(cupy.conj(x))) + + def _rmatmat(self, x): + return cupy.conj(self.A._matmat(cupy.conj(x))) + + +def _get_dtype(operators, dtypes=None): + if dtypes is None: + dtypes = [] + for obj in operators: + if obj is not None and hasattr(obj, 'dtype'): + dtypes.append(obj.dtype) + return cupy.result_type(*dtypes) + + +class _SumLinearOperator(LinearOperator): + def __init__(self, A, B): + if not isinstance(A, LinearOperator) or \ + not isinstance(B, LinearOperator): + raise ValueError('both operands have to be a LinearOperator') + if A.shape != B.shape: + raise ValueError('cannot add %r and %r: shape mismatch' + % (A, B)) + self.args = (A, B) + super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape) + + def _matvec(self, x): + return self.args[0].matvec(x) + self.args[1].matvec(x) + + def _rmatvec(self, x): + return self.args[0].rmatvec(x) + self.args[1].rmatvec(x) + + def _rmatmat(self, x): + return self.args[0].rmatmat(x) + self.args[1].rmatmat(x) + + def _matmat(self, x): + return self.args[0].matmat(x) + self.args[1].matmat(x) + + def _adjoint(self): + A, B = self.args + return A.H + B.H + + +class _ProductLinearOperator(LinearOperator): + def __init__(self, A, B): + if not isinstance(A, LinearOperator) or \ + not isinstance(B, LinearOperator): + raise ValueError('both operands have to be a LinearOperator') + if A.shape[1] != B.shape[0]: + raise ValueError('cannot multiply %r and %r: shape mismatch' + % (A, B)) + super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]), + (A.shape[0], B.shape[1])) + self.args = (A, B) + + def _matvec(self, x): + return self.args[0].matvec(self.args[1].matvec(x)) + + def _rmatvec(self, x): + return self.args[1].rmatvec(self.args[0].rmatvec(x)) + + def _rmatmat(self, x): + return self.args[1].rmatmat(self.args[0].rmatmat(x)) + + def _matmat(self, x): + return self.args[0].matmat(self.args[1].matmat(x)) + + def _adjoint(self): + A, B = self.args + return B.H * A.H + + +class _ScaledLinearOperator(LinearOperator): + def __init__(self, A, alpha): + if not isinstance(A, LinearOperator): + raise ValueError('LinearOperator expected as A') + if not cupy.isscalar(alpha): + raise ValueError('scalar expected as alpha') + dtype = _get_dtype([A], [type(alpha)]) + super(_ScaledLinearOperator, self).__init__(dtype, A.shape) + self.args = (A, alpha) + + def _matvec(self, x): + return self.args[1] * self.args[0].matvec(x) + + def _rmatvec(self, x): + return cupy.conj(self.args[1]) * self.args[0].rmatvec(x) + + def _rmatmat(self, x): + return cupy.conj(self.args[1]) * self.args[0].rmatmat(x) + + def _matmat(self, x): + return self.args[1] * self.args[0].matmat(x) + + def _adjoint(self): + A, alpha = self.args + return A.H * cupy.conj(alpha) + + +class _PowerLinearOperator(LinearOperator): + def __init__(self, A, p): + if not isinstance(A, LinearOperator): + raise ValueError('LinearOperator expected as A') + if A.shape[0] != A.shape[1]: + raise ValueError('square LinearOperator expected, got %r' % A) + if not _util.isintlike(p) or p < 0: + raise ValueError('non-negative integer expected as p') + + super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape) + self.args = (A, p) + + def _power(self, fun, x): + res = cupy.array(x, copy=True) + for i in range(self.args[1]): + res = fun(res) + return res + + def _matvec(self, x): + return self._power(self.args[0].matvec, x) + + def _rmatvec(self, x): + return self._power(self.args[0].rmatvec, x) + + def _rmatmat(self, x): + return self._power(self.args[0].rmatmat, x) + + def _matmat(self, x): + return self._power(self.args[0].matmat, x) + + def _adjoint(self): + A, p = self.args + return A.H ** p + + +class MatrixLinearOperator(LinearOperator): + def __init__(self, A): + super(MatrixLinearOperator, self).__init__(A.dtype, A.shape) + self.A = A + self.__adj = None + self.args = (A,) + + def _matmat(self, X): + return self.A.dot(X) + + def _adjoint(self): + if self.__adj is None: + self.__adj = _AdjointMatrixOperator(self) + return self.__adj + + +class _AdjointMatrixOperator(MatrixLinearOperator): + def __init__(self, adjoint): + self.A = adjoint.A.T.conj() + self.__adjoint = adjoint + self.args = (adjoint,) + self.shape = adjoint.shape[1], adjoint.shape[0] + + @property + def dtype(self): + return self.__adjoint.dtype + + def _adjoint(self): + return self.__adjoint + + +class IdentityOperator(LinearOperator): + def __init__(self, shape, dtype=None): + super(IdentityOperator, self).__init__(dtype, shape) + + def _matvec(self, x): + return x + + def _rmatvec(self, x): + return x + + def _rmatmat(self, x): + return x + + def _matmat(self, x): + return x + + def _adjoint(self): + return self + + +def aslinearoperator(A): + """Return `A` as a LinearOperator. + + Args: + A (array-like): + The input array to be converted to a `LinearOperator` object. + It may be any of the following types: + + * :class:`cupy.ndarray` + * sparse matrix (e.g. ``csr_matrix``, ``coo_matrix``, etc.) + * :class:`cupyx.scipy.sparse.linalg.LinearOperator` + * object with ``.shape`` and ``.matvec`` attributes + + Returns: + cupyx.scipy.sparse.linalg.LinearOperator: `LinearOperator` object + + .. seealso:: :func:`scipy.sparse.aslinearoperator`` + """ + if isinstance(A, LinearOperator): + return A + + elif isinstance(A, cupy.ndarray): + if A.ndim > 2: + raise ValueError('array must have ndim <= 2') + A = cupy.atleast_2d(A) + return MatrixLinearOperator(A) + + elif sparse.isspmatrix(A): + return MatrixLinearOperator(A) + + else: + if hasattr(A, 'shape') and hasattr(A, 'matvec'): + rmatvec = None + rmatmat = None + dtype = None + + if hasattr(A, 'rmatvec'): + rmatvec = A.rmatvec + if hasattr(A, 'rmatmat'): + rmatmat = A.rmatmat + if hasattr(A, 'dtype'): + dtype = A.dtype + return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, + rmatmat=rmatmat, dtype=dtype) + + else: + raise TypeError('type not understood') diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_iterative.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_iterative.py new file mode 100644 index 0000000000000000000000000000000000000000..688fc24209e383f3d1973a7e5d20c65db065acf2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_iterative.py @@ -0,0 +1,409 @@ +import numpy + +import cupy +from cupy import cublas +from cupy._core import _dtype +from cupy.cuda import device +from cupy_backends.cuda.libs import cublas as _cublas +from cupyx.scipy.sparse import _csr +from cupyx.scipy.sparse.linalg import _interface + + +def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, + atol=None): + """Uses Conjugate Gradient iteration to solve ``Ax = b``. + + Args: + A (ndarray, spmatrix or LinearOperator): The real or complex matrix of + the linear system with shape ``(n, n)``. ``A`` must be a hermitian, + positive definitive matrix with type of :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + b (cupy.ndarray): Right hand side of the linear system with shape + ``(n,)`` or ``(n, 1)``. + x0 (cupy.ndarray): Starting guess for the solution. + tol (float): Tolerance for convergence. + maxiter (int): Maximum number of iterations. + M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``. + The preconditioner should approximate the inverse of ``A``. + ``M`` must be :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + callback (function): User-specified function to call after each + iteration. It is called as ``callback(xk)``, where ``xk`` is the + current solution vector. + atol (float): Tolerance for convergence. + + Returns: + tuple: + It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is + the converged solution and ``info`` provides convergence + information. + + .. seealso:: :func:`scipy.sparse.linalg.cg` + """ + A, M, x, b = _make_system(A, M, x0, b) + matvec = A.matvec + psolve = M.matvec + + n = A.shape[0] + if maxiter is None: + maxiter = n * 10 + if n == 0: + return cupy.empty_like(b), 0 + b_norm = cupy.linalg.norm(b) + if b_norm == 0: + return b, 0 + if atol is None: + atol = tol * float(b_norm) + else: + atol = max(float(atol), tol * float(b_norm)) + + r = b - matvec(x) + iters = 0 + rho = 0 + while iters < maxiter: + z = psolve(r) + rho1 = rho + rho = cublas.dotc(r, z) + if iters == 0: + p = z + else: + beta = rho / rho1 + p = z + beta * p + q = matvec(p) + alpha = rho / cublas.dotc(p, q) + x = x + alpha * p + r = r - alpha * q + iters += 1 + if callback is not None: + callback(x) + resid = cublas.nrm2(r) + if resid <= atol: + break + + info = 0 + if iters == maxiter and not (resid <= atol): + info = iters + + return x, info + + +def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, + callback=None, atol=None, callback_type=None): + """Uses Generalized Minimal RESidual iteration to solve ``Ax = b``. + + Args: + A (ndarray, spmatrix or LinearOperator): The real or complex + matrix of the linear system with shape ``(n, n)``. ``A`` must be + :class:`cupy.ndarray`, :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + b (cupy.ndarray): Right hand side of the linear system with shape + ``(n,)`` or ``(n, 1)``. + x0 (cupy.ndarray): Starting guess for the solution. + tol (float): Tolerance for convergence. + restart (int): Number of iterations between restarts. Larger values + increase iteration cost, but may be necessary for convergence. + maxiter (int): Maximum number of iterations. + M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``. + The preconditioner should approximate the inverse of ``A``. + ``M`` must be :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + callback (function): User-specified function to call on every restart. + It is called as ``callback(arg)``, where ``arg`` is selected by + ``callback_type``. + callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution + vector is used as an argument of callback function. if 'pr_norm', + relative (preconditioned) residual norm is used as an argument. + atol (float): Tolerance for convergence. + + Returns: + tuple: + It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is + the converged solution and ``info`` provides convergence + information. + + Reference: + M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear + Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009). + + .. seealso:: :func:`scipy.sparse.linalg.gmres` + """ + A, M, x, b = _make_system(A, M, x0, b) + matvec = A.matvec + psolve = M.matvec + + n = A.shape[0] + if n == 0: + return cupy.empty_like(b), 0 + b_norm = cupy.linalg.norm(b) + if b_norm == 0: + return b, 0 + if atol is None: + atol = tol * float(b_norm) + else: + atol = max(float(atol), tol * float(b_norm)) + if maxiter is None: + maxiter = n * 10 + if restart is None: + restart = 20 + restart = min(restart, n) + if callback_type is None: + callback_type = 'pr_norm' + if callback_type not in ('x', 'pr_norm'): + raise ValueError('Unknown callback_type: {}'.format(callback_type)) + if callback is None: + callback_type = None + + V = cupy.empty((n, restart), dtype=A.dtype, order='F') + H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F') + e = numpy.zeros((restart+1,), dtype=A.dtype) + + compute_hu = _make_compute_hu(V) + + iters = 0 + while True: + mx = psolve(x) + r = b - matvec(mx) + r_norm = cublas.nrm2(r) + if callback_type == 'x': + callback(mx) + elif callback_type == 'pr_norm' and iters > 0: + callback(r_norm / b_norm) + if r_norm <= atol or iters >= maxiter: + break + v = r / r_norm + V[:, 0] = v + e[0] = r_norm + + # Arnoldi iteration + for j in range(restart): + z = psolve(v) + u = matvec(z) + H[:j+1, j], u = compute_hu(u, j) + cublas.nrm2(u, out=H[j+1, j]) + if j+1 < restart: + v = u / H[j+1, j] + V[:, j+1] = v + + # Note: The least-square solution to equation Hy = e is computed on CPU + # because it is faster if the matrix size is small. + ret = numpy.linalg.lstsq(cupy.asnumpy(H), e) + y = cupy.array(ret[0]) + x += V @ y + iters += restart + + info = 0 + if iters == maxiter and not (r_norm <= atol): + info = iters + return mx, info + + +def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, + atol=None): + """Use Conjugate Gradient Squared iteration to solve ``Ax = b``. + + Args: + A (ndarray, spmatrix or LinearOperator): The real or complex matrix of + the linear system with shape ``(n, n)``. + b (cupy.ndarray): Right hand side of the linear system with shape + ``(n,)`` or ``(n, 1)``. + x0 (cupy.ndarray): Starting guess for the solution. + tol (float): Tolerance for convergence. + maxiter (int): Maximum number of iterations. + M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``. + The preconditioner should approximate the inverse of ``A``. + ``M`` must be :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + callback (function): User-specified function to call after each + iteration. It is called as ``callback(xk)``, where ``xk`` is the + current solution vector. + atol (float): Tolerance for convergence. + + Returns: + tuple: + It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is + the converged solution and ``info`` provides convergence + information. + + .. seealso:: :func:`scipy.sparse.linalg.cgs` + """ + A, M, x, b = _make_system(A, M, x0, b) + + matvec = A.matvec + psolve = M.matvec + + n = A.shape[0] + if n == 0: + return cupy.empty_like(b), 0 + b_norm = cupy.linalg.norm(b) + if b_norm == 0: + return b, 0 + if atol is None: + atol = tol * float(b_norm) + else: + atol = max(float(atol), tol * float(b_norm)) + if maxiter is None: + maxiter = n * 5 + + r0 = b - matvec(x) + + rho = cupy.dot(r0, r0) + + # initialise vectors + r = r0.copy() + u = r0 + p = r0.copy() + + iters = 0 + while True: + y = psolve(p) + v = matvec(y) + sigma = cupy.dot(r0, v) + alpha = rho / sigma + q = u - alpha * v + + z = psolve(u + q) + x += alpha * z + Az = matvec(z) + r -= alpha * Az + + # Update residual norm and check convergence + r_norm = cupy.linalg.norm(r) + + iters += 1 + if callback is not None: + callback(x) + + if r_norm <= atol or iters >= maxiter: + break + + rho_new = cupy.dot(r0, r) + beta = rho_new / rho + rho = rho_new + u = r + beta * q + p *= beta + p += q + p *= beta + p += u + + info = 0 + if iters == maxiter and not (r_norm < atol): + info = iters + + return x, info + + +def _make_system(A, M, x0, b): + """Make a linear system Ax = b + + Args: + A (cupy.ndarray or cupyx.scipy.sparse.spmatrix or + cupyx.scipy.sparse.LinearOperator): sparse or dense matrix. + M (cupy.ndarray or cupyx.scipy.sparse.spmatrix or + cupyx.scipy.sparse.LinearOperator): preconditioner. + x0 (cupy.ndarray): initial guess to iterative method. + b (cupy.ndarray): right hand side. + + Returns: + tuple: + It returns (A, M, x, b). + A (LinaerOperator): matrix of linear system + M (LinearOperator): preconditioner + x (cupy.ndarray): initial guess + b (cupy.ndarray): right hand side. + """ + fast_matvec = _make_fast_matvec(A) + A = _interface.aslinearoperator(A) + if fast_matvec is not None: + A = _interface.LinearOperator(A.shape, matvec=fast_matvec, + rmatvec=A.rmatvec, dtype=A.dtype) + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix (shape: {})'.format(A.shape)) + if A.dtype.char not in 'fdFD': + raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype)) + n = A.shape[0] + if not (b.shape == (n,) or b.shape == (n, 1)): + raise ValueError('b has incompatible dimensions') + b = b.astype(A.dtype).ravel() + if x0 is None: + x = cupy.zeros((n,), dtype=A.dtype) + else: + if not (x0.shape == (n,) or x0.shape == (n, 1)): + raise ValueError('x0 has incompatible dimensions') + x = x0.astype(A.dtype).ravel() + if M is None: + M = _interface.IdentityOperator(shape=A.shape, dtype=A.dtype) + else: + fast_matvec = _make_fast_matvec(M) + M = _interface.aslinearoperator(M) + if fast_matvec is not None: + M = _interface.LinearOperator(M.shape, matvec=fast_matvec, + rmatvec=M.rmatvec, dtype=M.dtype) + if A.shape != M.shape: + raise ValueError('matrix and preconditioner have different shapes') + return A, M, x, b + + +def _make_fast_matvec(A): + from cupy_backends.cuda.libs import cusparse as _cusparse + from cupyx import cusparse + + if _csr.isspmatrix_csr(A) and cusparse.check_availability('spmv'): + handle = device.get_cusparse_handle() + op_a = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE + alpha = numpy.array(1.0, A.dtype) + beta = numpy.array(0.0, A.dtype) + cuda_dtype = _dtype.to_cuda_dtype(A.dtype) + alg = _cusparse.CUSPARSE_MV_ALG_DEFAULT + x = cupy.empty((A.shape[0],), dtype=A.dtype) + y = cupy.empty((A.shape[0],), dtype=A.dtype) + desc_A = cusparse.SpMatDescriptor.create(A) + desc_x = cusparse.DnVecDescriptor.create(x) + desc_y = cusparse.DnVecDescriptor.create(y) + buff_size = _cusparse.spMV_bufferSize( + handle, op_a, alpha.ctypes.data, desc_A.desc, desc_x.desc, + beta.ctypes.data, desc_y.desc, cuda_dtype, alg) + buff = cupy.empty(buff_size, cupy.int8) + del x, desc_x, y, desc_y + + def matvec(x): + y = cupy.empty_like(x) + desc_x = cusparse.DnVecDescriptor.create(x) + desc_y = cusparse.DnVecDescriptor.create(y) + _cusparse.spMV( + handle, op_a, alpha.ctypes.data, desc_A.desc, desc_x.desc, + beta.ctypes.data, desc_y.desc, cuda_dtype, alg, buff.data.ptr) + return y + + return matvec + return None + + +def _make_compute_hu(V): + handle = device.get_cublas_handle() + if V.dtype.char == 'f': + gemv = _cublas.sgemv + elif V.dtype.char == 'd': + gemv = _cublas.dgemv + elif V.dtype.char == 'F': + gemv = _cublas.cgemv + elif V.dtype.char == 'D': + gemv = _cublas.zgemv + n = V.shape[0] + one = numpy.array(1.0, V.dtype) + zero = numpy.array(0.0, V.dtype) + mone = numpy.array(-1.0, V.dtype) + + def compute_hu(u, j): + # h = V[:, :j+1].conj().T @ u + # u -= V[:, :j+1] @ h + h = cupy.empty((j+1,), dtype=V.dtype) + gemv(handle, _cublas.CUBLAS_OP_C, n, j+1, one.ctypes.data, V.data.ptr, + n, u.data.ptr, 1, zero.ctypes.data, h.data.ptr, 1) + gemv(handle, _cublas.CUBLAS_OP_N, n, j+1, mone.ctypes.data, V.data.ptr, + n, h.data.ptr, 1, one.ctypes.data, u.data.ptr, 1) + return h, u + return compute_hu diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_lobpcg.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_lobpcg.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a92c609cd421fc2f6996dd103ca5f5ccbd7452 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_lobpcg.py @@ -0,0 +1,674 @@ +import warnings + +import numpy +import cupy +import cupy.linalg as linalg +# waiting implementation of the following modules in PR #4172 +# from cupyx.scipy.linalg import (cho_factor, cho_solve) +from cupyx.scipy.sparse import linalg as splinalg + + +def _cholesky(B): + """ + Wrapper around `cupy.linalg.cholesky` that raises LinAlgError if there are + NaNs in the output + """ + R = cupy.linalg.cholesky(B) + if cupy.any(cupy.isnan(R)): + raise numpy.linalg.LinAlgError + return R + + +# TODO: This helper function can be replaced after cupy.block is supported +def _bmat(list_obj): + """ + Helper function to create a block matrix in cupy from a list + of smaller 2D dense arrays + """ + n_rows = len(list_obj) + n_cols = len(list_obj[0]) + final_shape = [0, 0] + # calculating expected size of output + for i in range(n_rows): + final_shape[0] += list_obj[i][0].shape[0] + for j in range(n_cols): + final_shape[1] += list_obj[0][j].shape[1] + # obtaining result's datatype + dtype = cupy.result_type(*[arr.dtype for + list_iter in list_obj for arr in list_iter]) + # checking order + F_order = all(arr.flags['F_CONTIGUOUS'] for list_iter + in list_obj for arr in list_iter) + C_order = all(arr.flags['C_CONTIGUOUS'] for list_iter + in list_obj for arr in list_iter) + order = 'F' if F_order and not C_order else 'C' + result = cupy.empty(tuple(final_shape), dtype=dtype, order=order) + + start_idx_row = 0 + start_idx_col = 0 + end_idx_row = 0 + end_idx_col = 0 + for i in range(n_rows): + end_idx_row = start_idx_row + list_obj[i][0].shape[0] + start_idx_col = 0 + for j in range(n_cols): + end_idx_col = start_idx_col + list_obj[i][j].shape[1] + result[start_idx_row:end_idx_row, + start_idx_col: end_idx_col] = list_obj[i][j] + start_idx_col = end_idx_col + start_idx_row = end_idx_row + return result + + +def _report_nonhermitian(M, name): + """ + Report if `M` is not a hermitian matrix given its type. + """ + + md = M - M.T.conj() + + nmd = linalg.norm(md, 1) + tol = 10 * cupy.finfo(M.dtype).eps + tol *= max(1, float(linalg.norm(M, 1))) + if nmd > tol: + warnings.warn( + f'Matrix {name} of the type {M.dtype} is not Hermitian: ' + f'condition: {nmd} < {tol} fails.', + UserWarning, stacklevel=4) + + +def _as2d(ar): + """ + If the input array is 2D return it, if it is 1D, append a dimension, + making it a column vector. + """ + if ar.ndim == 2: + return ar + else: # Assume 1! + aux = cupy.array(ar, copy=False) + aux.shape = (ar.shape[0], 1) + return aux + + +def _makeOperator(operatorInput, expectedShape): + """Takes a dense numpy array or a sparse matrix or + a function and makes an operator performing matrix * blockvector + products. + """ + if operatorInput is None: + return None + else: + operator = splinalg.aslinearoperator(operatorInput) + + if operator.shape != expectedShape: + raise ValueError('operator has invalid shape') + + return operator + + +def _applyConstraints(blockVectorV, YBY, blockVectorBY, blockVectorY): + """Changes blockVectorV in place.""" + YBV = cupy.dot(blockVectorBY.T.conj(), blockVectorV) + # awaiting the implementation of cho_solve in PR #4172 + # tmp = cho_solve(factYBY, YBV) + tmp = linalg.solve(YBY, YBV) + blockVectorV -= cupy.dot(blockVectorY, tmp) + + +def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False): + """B-orthonormalize the given block vector using Cholesky.""" + normalization = blockVectorV.max( + axis=0) + cupy.finfo(blockVectorV.dtype).eps + blockVectorV = blockVectorV / normalization + if blockVectorBV is None: + if B is not None: + blockVectorBV = B(blockVectorV) + else: + blockVectorBV = blockVectorV + else: + blockVectorBV = blockVectorBV / normalization + VBV = cupy.matmul(blockVectorV.T.conj(), blockVectorBV) + try: + # VBV is a Cholesky factor + VBV = _cholesky(VBV) + VBV = linalg.inv(VBV.T) + blockVectorV = cupy.matmul(blockVectorV, VBV) + if B is not None: + blockVectorBV = cupy.matmul(blockVectorBV, VBV) + else: + blockVectorBV = None + except numpy.linalg.LinAlgError: + # LinAlg Error: cholesky transformation might fail in rare cases + # raise ValueError("cholesky has failed") + blockVectorV = None + blockVectorBV = None + VBV = None + + if retInvR: + return blockVectorV, blockVectorBV, VBV, normalization + else: + return blockVectorV, blockVectorBV + + +def _get_indx(_lambda, num, largest): + """Get `num` indices into `_lambda` depending on `largest` option.""" + ii = cupy.argsort(_lambda) + if largest: + ii = ii[:-num - 1:-1] + else: + ii = ii[:num] + return ii + + +# TODO: This helper function can be replaced after cupy.eigh +# supports generalized eigen value problems. +def _eigh(A, B=None): + """ + Helper function for converting a generalized eigenvalue problem + A(X) = lambda(B(X)) to standard eigen value problem using cholesky + transformation + """ + if B is None: # use cupy's eigh in standard case + vals, vecs = linalg.eigh(A) + return vals, vecs + R = _cholesky(B) + RTi = linalg.inv(R) + Ri = linalg.inv(R.T) + F = cupy.matmul(RTi, cupy.matmul(A, Ri)) + vals, vecs = linalg.eigh(F) + eigVec = cupy.matmul(Ri, vecs) + return vals, eigVec + + +def lobpcg(A, X, + B=None, M=None, Y=None, + tol=None, maxiter=None, + largest=True, verbosityLevel=0, + retLambdaHistory=False, retResidualNormsHistory=False): + """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG) + + LOBPCG is a preconditioned eigensolver for large symmetric positive + definite (SPD) generalized eigenproblems. + + Args: + A (array-like): The symmetric linear operator of the problem, + usually a sparse matrix. Can be of the following types + - cupy.ndarray + - cupyx.scipy.sparse.csr_matrix + - cupy.scipy.sparse.linalg.LinearOperator + X (cupy.ndarray): Initial approximation to the ``k`` + eigenvectors (non-sparse). If `A` has ``shape=(n,n)`` + then `X` should have shape ``shape=(n,k)``. + B (array-like): The right hand side operator in a generalized + eigenproblem. By default, ``B = Identity``. + Can be of following types: + - cupy.ndarray + - cupyx.scipy.sparse.csr_matrix + - cupy.scipy.sparse.linalg.LinearOperator + M (array-like): Preconditioner to `A`; by default ``M = Identity``. + `M` should approximate the inverse of `A`. + Can be of the following types: + - cupy.ndarray + - cupyx.scipy.sparse.csr_matrix + - cupy.scipy.sparse.linalg.LinearOperator + Y (cupy.ndarray): + `n-by-sizeY` matrix of constraints (non-sparse), `sizeY < n` + The iterations will be performed in the B-orthogonal complement + of the column-space of Y. Y must be full rank. + tol (float): + Solver tolerance (stopping criterion). + The default is ``tol=n*sqrt(eps)``. + maxiter (int): + Maximum number of iterations. The default is ``maxiter = 20``. + largest (bool): + When True, solve for the largest eigenvalues, + otherwise the smallest. + verbosityLevel (int): + Controls solver output. The default is ``verbosityLevel=0``. + retLambdaHistory (bool): + Whether to return eigenvalue history. Default is False. + retResidualNormsHistory (bool): + Whether to return history of residual norms. Default is False. + + Returns: + tuple: + - `w` (cupy.ndarray): Array of ``k`` eigenvalues + - `v` (cupy.ndarray) An array of ``k`` eigenvectors. + `v` has the same shape as `X`. + - `lambdas` (list of cupy.ndarray): The eigenvalue history, + if `retLambdaHistory` is True. + - `rnorms` (list of cupy.ndarray): The history of residual norms, + if `retResidualNormsHistory` is True. + + .. seealso:: :func:`scipy.sparse.linalg.lobpcg` + + .. note:: + If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are `True` + the return tuple has the following format + ``(lambda, V, lambda history, residual norms history)``. + """ + blockVectorX = X + blockVectorY = Y + residualTolerance = tol + + if maxiter is None: + maxiter = 20 + + if blockVectorY is not None: + sizeY = blockVectorY.shape[1] + else: + sizeY = 0 + + if len(blockVectorX.shape) != 2: + raise ValueError('expected rank-2 array for argument X') + + n, sizeX = blockVectorX.shape + + if verbosityLevel: + aux = "Solving " + if B is None: + aux += "standard" + else: + aux += "generalized" + aux += " eigenvalue problem with" + if M is None: + aux += "out" + aux += " preconditioning\n\n" + aux += "matrix size %d\n" % n + aux += "block size %d\n\n" % sizeX + if blockVectorY is None: + aux += "No constraints\n\n" + else: + if sizeY > 1: + aux += "%d constraints\n\n" % sizeY + else: + aux += "%d constraint\n\n" % sizeY + print(aux) + + A = _makeOperator(A, (n, n)) + B = _makeOperator(B, (n, n)) + M = _makeOperator(M, (n, n)) + + if (n - sizeY) < (5 * sizeX): + # The problem size is small compared to the block size. + # Using dense general eigensolver instead of LOBPCG. + sizeX = min(sizeX, n) + + if blockVectorY is not None: + raise NotImplementedError('The dense eigensolver ' + 'does not support constraints.') + + A_dense = A(cupy.eye(n, dtype=A.dtype)) + B_dense = None if B is None else B(cupy.eye(n, dtype=B.dtype)) + + # call numerically unstable general eigen solver + vals, vecs = _eigh(A_dense, B_dense) + if largest: + # Reverse order to be compatible with eigs() in 'LM' mode. + vals = vals[::-1] + vecs = vecs[:, ::-1] + + vals = vals[:sizeX] + vecs = vecs[:, :sizeX] + + return vals, vecs + + if (residualTolerance is None) or (residualTolerance <= 0.0): + residualTolerance = cupy.sqrt(1e-15) * n + + # Apply constraints to X. + if blockVectorY is not None: + + if B is not None: + blockVectorBY = B(blockVectorY) + else: + blockVectorBY = blockVectorY + + # gramYBY is a dense array. + gramYBY = cupy.dot(blockVectorY.T.conj(), blockVectorBY) + + # awaiting implementation of cho_factor in PR #4172 + # try: + # gramYBY is a Cholesky factor from now on... + # gramYBY = cho_factor(gramYBY) + # except numpy.linalg.LinAlgError: + # raise ValueError("cannot handle linearly dependent constraints") + + _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) + + # B-orthonormalize X. + blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX) + + # Compute the initial Ritz vectors: solve the eigenproblem. + blockVectorAX = A(blockVectorX) + gramXAX = cupy.dot(blockVectorX.T.conj(), blockVectorAX) + + _lambda, eigBlockVector = _eigh(gramXAX) + ii = _get_indx(_lambda, sizeX, largest) + _lambda = _lambda[ii] + + eigBlockVector = cupy.asarray(eigBlockVector[:, ii]) + blockVectorX = cupy.dot(blockVectorX, eigBlockVector) + blockVectorAX = cupy.dot(blockVectorAX, eigBlockVector) + if B is not None: + blockVectorBX = cupy.dot(blockVectorBX, eigBlockVector) + + # Active index set. + activeMask = cupy.ones((sizeX,), dtype=bool) + + lambdaHistory = [_lambda] + residualNormsHistory = [] + + previousBlockSize = sizeX + ident = cupy.eye(sizeX, dtype=A.dtype) + ident0 = cupy.eye(sizeX, dtype=A.dtype) + + ## + # Main iteration loop. + + blockVectorP = None # set during iteration + blockVectorAP = None + blockVectorBP = None + + iterationNumber = -1 + restart = True + explicitGramFlag = False + while iterationNumber < maxiter: + iterationNumber += 1 + + if B is not None: + aux = blockVectorBX * _lambda[cupy.newaxis, :] + else: + aux = blockVectorX * _lambda[cupy.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = cupy.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = cupy.sqrt(aux) + + residualNormsHistory.append(residualNorms) + + ii = cupy.where(residualNorms > residualTolerance, True, False) + activeMask = activeMask & ii + + currentBlockSize = int(activeMask.sum()) + if currentBlockSize != previousBlockSize: + previousBlockSize = currentBlockSize + ident = cupy.eye(currentBlockSize, dtype=A.dtype) + + if currentBlockSize == 0: + break + + if verbosityLevel > 0: + print('iteration %d' % iterationNumber) + print(f'current block size: {currentBlockSize}') + print(f'eigenvalue(s):\n{_lambda}') + print(f'residual norm(s):\n{residualNorms}') + if verbosityLevel > 10: + print(eigBlockVector) + + activeBlockVectorR = _as2d(blockVectorR[:, activeMask]) + + if iterationNumber > 0: + activeBlockVectorP = _as2d(blockVectorP[:, activeMask]) + activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask]) + if B is not None: + activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask]) + + if M is not None: + # Apply preconditioner T to the active residuals. + activeBlockVectorR = M(activeBlockVectorR) + + # Apply constraints to the preconditioned residuals. + if blockVectorY is not None: + _applyConstraints(activeBlockVectorR, + gramYBY, blockVectorBY, blockVectorY) + + # B-orthogonalize the preconditioned residuals to X. + if B is not None: + activeBlockVectorR = activeBlockVectorR\ + - cupy.matmul(blockVectorX, + cupy + .matmul(blockVectorBX.T.conj(), + activeBlockVectorR)) + else: + activeBlockVectorR = activeBlockVectorR - \ + cupy.matmul(blockVectorX, + cupy.matmul(blockVectorX.T.conj(), + activeBlockVectorR)) + + ## + # B-orthonormalize the preconditioned residuals. + aux = _b_orthonormalize(B, activeBlockVectorR) + activeBlockVectorR, activeBlockVectorBR = aux + + activeBlockVectorAR = A(activeBlockVectorR) + + if iterationNumber > 0: + if B is not None: + aux = _b_orthonormalize(B, activeBlockVectorP, + activeBlockVectorBP, retInvR=True) + activeBlockVectorP, activeBlockVectorBP, invR, normal = aux + else: + aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True) + activeBlockVectorP, _, invR, normal = aux + # Function _b_orthonormalize returns None if Cholesky fails + if activeBlockVectorP is not None: + activeBlockVectorAP = activeBlockVectorAP / normal + activeBlockVectorAP = cupy.dot(activeBlockVectorAP, invR) + restart = False + else: + restart = True + + ## + # Perform the Rayleigh Ritz Procedure: + # Compute symmetric Gram matrices: + + if activeBlockVectorAR.dtype == 'float32': + myeps = 1 + elif activeBlockVectorR.dtype == 'float32': + myeps = 1e-4 + else: + myeps = 1e-8 + + if residualNorms.max() > myeps and not explicitGramFlag: + explicitGramFlag = False + else: + # Once explicitGramFlag, forever explicitGramFlag. + explicitGramFlag = True + + # Shared memory assignments to simplify the code + if B is None: + blockVectorBX = blockVectorX + activeBlockVectorBR = activeBlockVectorR + if not restart: + activeBlockVectorBP = activeBlockVectorP + + # Common submatrices: + gramXAR = cupy.dot(blockVectorX.T.conj(), activeBlockVectorAR) + gramRAR = cupy.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR) + + if explicitGramFlag: + gramRAR = (gramRAR + gramRAR.T.conj()) / 2 + gramXAX = cupy.dot(blockVectorX.T.conj(), blockVectorAX) + gramXAX = (gramXAX + gramXAX.T.conj()) / 2 + gramXBX = cupy.dot(blockVectorX.T.conj(), blockVectorBX) + gramRBR = cupy.dot(activeBlockVectorR.T.conj(), + activeBlockVectorBR) + gramXBR = cupy.dot(blockVectorX.T.conj(), activeBlockVectorBR) + else: + gramXAX = cupy.diag(_lambda) + gramXBX = ident0 + gramRBR = ident + gramXBR = cupy.zeros((int(sizeX), int(currentBlockSize)), + dtype=A.dtype) + + def _handle_gramA_gramB_verbosity(gramA, gramB): + if verbosityLevel > 0: + _report_nonhermitian(gramA, 'gramA') + _report_nonhermitian(gramB, 'gramB') + if verbosityLevel > 10: + # Note: not documented, but leave it in here for now + numpy.savetxt('gramA.txt', cupy.asnumpy(gramA)) + numpy.savetxt('gramB.txt', cupy.asnumpy(gramB)) + + if not restart: + gramXAP = cupy.dot(blockVectorX.T.conj(), activeBlockVectorAP) + gramRAP = cupy.dot(activeBlockVectorR.T.conj(), + activeBlockVectorAP) + gramPAP = cupy.dot(activeBlockVectorP.T.conj(), + activeBlockVectorAP) + gramXBP = cupy.dot(blockVectorX.T.conj(), activeBlockVectorBP) + gramRBP = cupy.dot(activeBlockVectorR.T.conj(), + activeBlockVectorBP) + if explicitGramFlag: + gramPAP = (gramPAP + gramPAP.T.conj()) / 2 + gramPBP = cupy.dot(activeBlockVectorP.T.conj(), + activeBlockVectorBP) + else: + gramPBP = ident + + gramA = _bmat([[gramXAX, gramXAR, gramXAP], + [gramXAR.T.conj(), gramRAR, gramRAP], + [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]]) + gramB = _bmat([[gramXBX, gramXBR, gramXBP], + [gramXBR.T.conj(), gramRBR, gramRBP], + [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]]) + + _handle_gramA_gramB_verbosity(gramA, gramB) + + try: + _lambda, eigBlockVector = _eigh(gramA, gramB) + except numpy.linalg.LinAlgError: + # try again after dropping the direction vectors P from RR + restart = True + + if restart: + gramA = _bmat([[gramXAX, gramXAR], + [gramXAR.T.conj(), gramRAR]]) + gramB = _bmat([[gramXBX, gramXBR], + [gramXBR.T.conj(), gramRBR]]) + + _handle_gramA_gramB_verbosity(gramA, gramB) + + try: + _lambda, eigBlockVector = _eigh(gramA, gramB) + except numpy.linalg.LinAlgError: + raise ValueError('eigh has failed in lobpcg iterations') + + ii = _get_indx(_lambda, sizeX, largest) + if verbosityLevel > 10: + print(ii) + print(_lambda) + + _lambda = _lambda[ii] + eigBlockVector = eigBlockVector[:, ii] + + lambdaHistory.append(_lambda) + + if verbosityLevel > 10: + print('lambda:', _lambda) + + if verbosityLevel > 10: + print(eigBlockVector) + + # Compute Ritz vectors. + if B is not None: + if not restart: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:sizeX + + currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] + + pp = cupy.dot(activeBlockVectorR, eigBlockVectorR) + pp += cupy.dot(activeBlockVectorP, eigBlockVectorP) + + app = cupy.dot(activeBlockVectorAR, eigBlockVectorR) + app += cupy.dot(activeBlockVectorAP, eigBlockVectorP) + + bpp = cupy.dot(activeBlockVectorBR, eigBlockVectorR) + bpp += cupy.dot(activeBlockVectorBP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = cupy.dot(activeBlockVectorR, eigBlockVectorR) + app = cupy.dot(activeBlockVectorAR, eigBlockVectorR) + bpp = cupy.dot(activeBlockVectorBR, eigBlockVectorR) + + if verbosityLevel > 10: + print(pp) + print(app) + print(bpp) + + blockVectorX = cupy.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = cupy.dot(blockVectorAX, eigBlockVectorX) + app + blockVectorBX = cupy.dot(blockVectorBX, eigBlockVectorX) + bpp + + blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp + + else: + if not restart: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:sizeX + + currentBlockSize] + eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:] + + pp = cupy.dot(activeBlockVectorR, eigBlockVectorR) + pp += cupy.dot(activeBlockVectorP, eigBlockVectorP) + + app = cupy.dot(activeBlockVectorAR, eigBlockVectorR) + app += cupy.dot(activeBlockVectorAP, eigBlockVectorP) + else: + eigBlockVectorX = eigBlockVector[:sizeX] + eigBlockVectorR = eigBlockVector[sizeX:] + + pp = cupy.dot(activeBlockVectorR, eigBlockVectorR) + app = cupy.dot(activeBlockVectorAR, eigBlockVectorR) + + if verbosityLevel > 10: + print(pp) + print(app) + + blockVectorX = cupy.dot(blockVectorX, eigBlockVectorX) + pp + blockVectorAX = cupy.dot(blockVectorAX, eigBlockVectorX) + app + + blockVectorP, blockVectorAP = pp, app + + if B is not None: + aux = blockVectorBX * _lambda[cupy.newaxis, :] + + else: + aux = blockVectorX * _lambda[cupy.newaxis, :] + + blockVectorR = blockVectorAX - aux + + aux = cupy.sum(blockVectorR.conj() * blockVectorR, 0) + residualNorms = cupy.sqrt(aux) + + if verbosityLevel > 0: + print(f'Final iterative eigenvalue(s):\n{_lambda}') + print(f'Final iterative residual norm(s):\n{residualNorms}') + + # Future work: + # Generalized eigen value solver like `scipy.linalg.eigh` + # that takes in `B` matrix as input + # `cupy.linalg.cholesky` is more unstable than `scipy.linalg.cholesky` + # Making sure eigenvectors "exactly" satisfy the blockVectorY constrains? + # Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR + # Computing the actual true residuals + + if verbosityLevel > 0: + print(f'Final postprocessing eigenvalue(s):\n{_lambda}') + print(f'Final residual norm(s):\n{residualNorms}') + + if retLambdaHistory: + if retResidualNormsHistory: + return _lambda, blockVectorX, lambdaHistory, residualNormsHistory + else: + return _lambda, blockVectorX, lambdaHistory + else: + if retResidualNormsHistory: + return _lambda, blockVectorX, residualNormsHistory + else: + return _lambda, blockVectorX diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_norm.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..7ecb6c356b0d10f22b022758a43b0586fc3d90e2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_norm.py @@ -0,0 +1,111 @@ +import numpy + +import cupy +import cupyx.scipy.sparse + + +def _sparse_frobenius_norm(x): + if cupy.issubdtype(x.dtype, cupy.complexfloating): + sqnorm = abs(x).power(2).sum() + else: + sqnorm = x.power(2).sum() + return cupy.sqrt(sqnorm) + + +def norm(x, ord=None, axis=None): + """Norm of a cupy.scipy.spmatrix + + This function is able to return one of seven different sparse matrix norms, + depending on the value of the ``ord`` parameter. + + Args: + x (sparse matrix) : Input sparse matrix. + ord (non-zero int, inf, -inf, 'fro', optional) : Order of the norm (see + table under ``Notes``). inf means numpy's `inf` object. + axis : (int, 2-tuple of ints, None, optional): If `axis` is an + integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm + (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. + Returns: + ndarray : 0-D or 1-D array or norm(s). + + .. seealso:: :func:`scipy.sparse.linalg.norm` + """ + + if not cupyx.scipy.sparse.issparse(x): + raise TypeError(("input is not sparse. use cupy.linalg.norm")) + + # Check the default case first and handle it immediately. + if axis is None and ord in (None, 'fro', 'f'): + return _sparse_frobenius_norm(x) + + # Some norms require functions that are not implemented for all types. + x = x.tocsr() + + if axis is None: + axis = (0, 1) + elif not isinstance(axis, tuple): + msg = "'axis' must be None, an integer or a tuple of integers" + try: + int_axis = int(axis) + except TypeError: + raise TypeError(msg) + if axis != int_axis: + raise TypeError(msg) + axis = (int_axis,) + + nd = 2 + if len(axis) == 2: + row_axis, col_axis = axis + if not (-nd <= row_axis < nd and -nd <= col_axis < nd): + raise ValueError('Invalid axis %r for an array with shape %r' % + (axis, x.shape)) + if row_axis % nd == col_axis % nd: + raise ValueError('Duplicate axes given.') + if ord == 2: + raise NotImplementedError + # return _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + raise NotImplementedError + # return _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + return abs(x).sum(axis=row_axis).max() + elif ord == numpy.inf: + return abs(x).sum(axis=col_axis).max() + elif ord == -1: + return abs(x).sum(axis=row_axis).min() + elif ord == -numpy.inf: + return abs(x).sum(axis=col_axis).min() + elif ord in (None, 'f', 'fro'): + # The axis order does not matter for this norm. + return _sparse_frobenius_norm(x) + else: + raise ValueError("Invalid norm order for matrices.") + elif len(axis) == 1: + a, = axis + if not (-nd <= a < nd): + raise ValueError('Invalid axis %r for an array with shape %r' % + (axis, x.shape)) + if ord == numpy.inf: + return abs(x).max(axis=a).toarray().ravel() + elif ord == -numpy.inf: + return abs(x).min(axis=a).toarray().ravel() + elif ord == 0: + # Zero norm + return (x != 0).astype(numpy.float32).sum(axis=a).ravel().astype( + numpy.int_) + elif ord == 1: + # special case for speedup + return abs(x).sum(axis=a).ravel() + elif ord in (2, None): + return cupy.sqrt(abs(x).power(2).sum(axis=a)).ravel() + else: + try: + ord + 1 + except TypeError: + raise ValueError('Invalid norm order for vectors.') + return cupy.power(abs(x).power(ord).sum(axis=a), 1 / ord).ravel() + else: + raise ValueError("Improper number of dimensions to norm.") diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_solve.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_solve.py new file mode 100644 index 0000000000000000000000000000000000000000..64a7ced21a41103db69f6a0773f78f67a62b25f3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_solve.py @@ -0,0 +1,1036 @@ +import numpy + +import cupy +from cupy import cublas +from cupy.cuda import device +from cupy.cuda import runtime +from cupy.linalg import _util +from cupyx.scipy import sparse +from cupyx.scipy.sparse.linalg import _interface +from cupyx.scipy.sparse.linalg._iterative import _make_system + +import warnings +try: + import scipy.sparse + import scipy.sparse.linalg + scipy_available = True +except ImportError: + scipy_available = False + + +def lsqr(A, b): + """Solves linear system with QR decomposition. + + Find the solution to a large, sparse, linear system of equations. + The function solves ``Ax = b``. Given two-dimensional matrix ``A`` is + decomposed into ``Q * R``. + + Args: + A (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): The input matrix + with dimension ``(N, N)`` + b (cupy.ndarray): Right-hand side vector. + + Returns: + tuple: + Its length must be ten. It has same type elements + as SciPy. Only the first element, the solution vector ``x``, is + available and other elements are expressed as ``None`` because + the implementation of cuSOLVER is different from the one of SciPy. + You can easily calculate the fourth element by ``norm(b - Ax)`` + and the ninth element by ``norm(x)``. + + .. seealso:: :func:`scipy.sparse.linalg.lsqr` + """ + from cupy_backends.cuda.libs import cusolver + + if runtime.is_hip: + raise RuntimeError('HIP does not support lsqr') + if not sparse.isspmatrix_csr(A): + A = sparse.csr_matrix(A) + # csr_matrix is 2d + _util._assert_stacked_square(A) + _util._assert_cupy_array(b) + m = A.shape[0] + if b.ndim != 1 or len(b) != m: + raise ValueError('b must be 1-d array whose size is same as A') + + # Cast to float32 or float64 + if A.dtype == 'f' or A.dtype == 'd': + dtype = A.dtype + else: + dtype = numpy.promote_types(A.dtype, 'f') + + handle = device.get_cusolver_sp_handle() + nnz = A.nnz + tol = 1.0 + reorder = 1 + x = cupy.empty(m, dtype=dtype) + singularity = numpy.empty(1, numpy.int32) + + if dtype == 'f': + csrlsvqr = cusolver.scsrlsvqr + else: + csrlsvqr = cusolver.dcsrlsvqr + csrlsvqr( + handle, m, nnz, A._descr.descriptor, A.data.data.ptr, + A.indptr.data.ptr, A.indices.data.ptr, b.data.ptr, tol, reorder, + x.data.ptr, singularity.ctypes.data) + + # The return type of SciPy is always float64. Therefore, x must be casted. + x = x.astype(numpy.float64) + ret = (x, None, None, None, None, None, None, None, None, None) + return ret + + +def lsmr(A, b, x0=None, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, + maxiter=None): + """Iterative solver for least-squares problems. + + lsmr solves the system of linear equations ``Ax = b``. If the system + is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``. + A is a rectangular matrix of dimension m-by-n, where all cases are + allowed: m = n, m > n, or m < n. B is a vector of length m. + The matrix A may be dense or sparse (usually sparse). + + Args: + A (ndarray, spmatrix or LinearOperator): The real or complex + matrix of the linear system. ``A`` must be + :class:`cupy.ndarray`, :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + b (cupy.ndarray): Right hand side of the linear system with shape + ``(m,)`` or ``(m, 1)``. + x0 (cupy.ndarray): Starting guess for the solution. If None zeros are + used. + damp (float): Damping factor for regularized least-squares. + `lsmr` solves the regularized least-squares problem + :: + + min ||(b) - ( A )x|| + ||(0) (damp*I) ||_2 + + where damp is a scalar. If damp is None or 0, the system + is solved without regularization. + atol, btol (float): + Stopping tolerances. `lsmr` continues iterations until a + certain backward error estimate is smaller than some quantity + depending on atol and btol. + conlim (float): `lsmr` terminates if an estimate of ``cond(A)`` i.e. + condition number of matrix exceeds `conlim`. If `conlim` is None, + the default value is 1e+8. + maxiter (int): Maximum number of iterations. + + Returns: + tuple: + - `x` (ndarray): Least-square solution returned. + - `istop` (int): istop gives the reason for stopping:: + + 0 means x=0 is a solution. + + 1 means x is an approximate solution to A*x = B, + according to atol and btol. + + 2 means x approximately solves the least-squares problem + according to atol. + + 3 means COND(A) seems to be greater than CONLIM. + + 4 is the same as 1 with atol = btol = eps (machine + precision) + + 5 is the same as 2 with atol = eps. + + 6 is the same as 3 with CONLIM = 1/eps. + + 7 means ITN reached maxiter before the other stopping + conditions were satisfied. + + - `itn` (int): Number of iterations used. + - `normr` (float): ``norm(b-Ax)`` + - `normar` (float): ``norm(A^T (b - Ax))`` + - `norma` (float): ``norm(A)`` + - `conda` (float): Condition number of A. + - `normx` (float): ``norm(x)`` + + .. seealso:: :func:`scipy.sparse.linalg.lsmr` + + References: + D. C.-L. Fong and M. A. Saunders, "LSMR: An iterative algorithm for + sparse least-squares problems", SIAM J. Sci. Comput., + vol. 33, pp. 2950-2971, 2011. + """ + A = _interface.aslinearoperator(A) + b = b.squeeze() + matvec = A.matvec + rmatvec = A.rmatvec + m, n = A.shape + minDim = min([m, n]) + + if maxiter is None: + maxiter = minDim * 5 + + u = b.copy() + normb = cublas.nrm2(b) + beta = normb.copy() + normb = normb.get().item() + if x0 is None: + x = cupy.zeros((n,), dtype=A.dtype) + else: + if not (x0.shape == (n,) or x0.shape == (n, 1)): + raise ValueError('x0 has incompatible dimensions') + x = x0.astype(A.dtype).ravel() + u -= matvec(x) + beta = cublas.nrm2(u) + + beta_cpu = beta.get().item() + + v = cupy.zeros(n) + alpha = cupy.zeros((), dtype=beta.dtype) + alpha_cpu = 0 + + if beta_cpu > 0: + u /= beta + v = rmatvec(u) + alpha = cublas.nrm2(v) + alpha_cpu = alpha.get().item() + + if alpha_cpu > 0: + v /= alpha + + # Initialize variables for 1st iteration. + + itn = 0 + zetabar = alpha_cpu * beta_cpu + alphabar = alpha_cpu + rho = 1 + rhobar = 1 + cbar = 1 + sbar = 0 + + h = v.copy() + hbar = cupy.zeros(n) + # x = cupy.zeros(n) + + # Initialize variables for estimation of ||r||. + + betadd = beta_cpu + betad = 0 + rhodold = 1 + tautildeold = 0 + thetatilde = 0 + zeta = 0 + d = 0 + + # Initialize variables for estimation of ||A|| and cond(A) + + normA2 = alpha_cpu * alpha_cpu + maxrbar = 0 + minrbar = 1e+100 + normA = alpha_cpu + condA = 1 + normx = 0 + + # Items for use in stopping rules. + istop = 0 + ctol = 0 + if conlim > 0: + ctol = 1 / conlim + normr = beta_cpu + + # Golub-Kahan process terminates when either alpha or beta is zero. + # Reverse the order here from the original matlab code because + # there was an error on return when arnorm==0 + normar = alpha_cpu * beta_cpu + if normar == 0: + return x, istop, itn, normr, normar, normA, condA, normx + + # Main iteration loop. + while itn < maxiter: + itn = itn + 1 + + # Perform the next step of the bidiagonalization to obtain the + # next beta, u, alpha, v. These satisfy the relations + # beta*u = a*v - alpha*u, + # alpha*v = A'*u - beta*v. + + u *= -alpha + u += matvec(v) + beta = cublas.nrm2(u) # norm(u) + beta_cpu = beta.get().item() + + if beta_cpu > 0: + u /= beta + v *= -beta + v += rmatvec(u) + alpha = cublas.nrm2(v) # norm(v) + alpha_cpu = alpha.get().item() + if alpha_cpu > 0: + v /= alpha + + # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}. + + # Construct rotation Qhat_{k,2k+1}. + + chat, shat, alphahat = _symOrtho(alphabar, damp) + + # Use a plane rotation (Q_i) to turn B_i to R_i + + rhoold = rho + c, s, rho = _symOrtho(alphahat, beta_cpu) + thetanew = s * alpha_cpu + alphabar = c * alpha_cpu + + # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar + + rhobarold = rhobar + zetaold = zeta + thetabar = sbar * rho + rhotemp = cbar * rho + cbar, sbar, rhobar = _symOrtho(cbar * rho, thetanew) + zeta = cbar * zetabar + zetabar = - sbar * zetabar + + # Update h, h_hat, x. + + # hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar + hbar *= -(thetabar * rho / (rhoold * rhobarold)) + hbar += h + x += (zeta / (rho * rhobar)) * hbar + # h = v - (thetanew / rho) * h + h *= -(thetanew / rho) + h += v + + # Estimate of ||r||. + + # Apply rotation Qhat_{k,2k+1}. + betaacute = chat * betadd + betacheck = -shat * betadd + + # Apply rotation Q_{k,k+1}. + betahat = c * betaacute + betadd = -s * betaacute + + # Apply rotation Qtilde_{k-1}. + # betad = betad_{k-1} here. + + thetatildeold = thetatilde + ctildeold, stildeold, rhotildeold = _symOrtho(rhodold, thetabar) + thetatilde = stildeold * rhobar + rhodold = ctildeold * rhobar + betad = - stildeold * betad + ctildeold * betahat + + # betad = betad_k here. + # rhodold = rhod_k here. + + tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold + taud = (zeta - thetatilde * tautildeold) / rhodold + d = d + betacheck * betacheck + normr = numpy.sqrt(d + (betad - taud)**2 + betadd * betadd) + + # Estimate ||A||. + normA2 = normA2 + beta_cpu * beta_cpu + normA = numpy.sqrt(normA2) + normA2 = normA2 + alpha_cpu * alpha_cpu + + # Estimate cond(A). + maxrbar = max(maxrbar, rhobarold) + if itn > 1: + minrbar = min(minrbar, rhobarold) + condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp) + + # Test for convergence. + + # Compute norms for convergence testing. + normar = abs(zetabar) + normx = cublas.nrm2(x) + normx = normx.get().item() + + # Now use these norms to estimate certain other quantities, + # some of which will be small near a solution. + + test1 = normr / normb + if (normA * normr) != 0: + test2 = normar / (normA * normr) + else: + test2 = numpy.inf + test3 = 1 / condA + t1 = test1 / (1 + normA*normx/normb) + rtol = btol + atol*normA*normx/normb + + # The following tests guard against extremely small values of + # atol, btol or ctol. (The user may have set any or all of + # the parameters atol, btol, conlim to 0.) + # The effect is equivalent to the normAl tests using + # atol = eps, btol = eps, conlim = 1/eps. + + if itn >= maxiter: + istop = 7 + if 1 + test3 <= 1: + istop = 6 + if 1 + test2 <= 1: + istop = 5 + if 1 + t1 <= 1: + istop = 4 + + # Allow for tolerances set by the user. + + if test3 <= ctol: + istop = 3 + if test2 <= atol: + istop = 2 + if test1 <= rtol: + istop = 1 + + if istop > 0: + break + + # The return type of SciPy is always float64. Therefore, x must be casted. + x = x.astype(numpy.float64) + + return x, istop, itn, normr, normar, normA, condA, normx + + +def _should_use_spsm(b): + from cupy_backends.cuda.libs import cusparse as _cusparse + + if not runtime.is_hip: + # Starting with CUDA 12.0, we use cusparseSpSM + return _cusparse.get_build_version() >= 12000 + else: + # Keep using hipsparsecsrsm2 for ROCm before it is dropped + return False + + +def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False, + unit_diagonal=False): + """Solves a sparse triangular system ``A x = b``. + + Args: + A (cupyx.scipy.sparse.spmatrix): + Sparse matrix with dimension ``(M, M)``. + b (cupy.ndarray): + Dense vector or matrix with dimension ``(M)`` or ``(M, K)``. + lower (bool): + Whether ``A`` is a lower or upper triangular matrix. + If True, it is lower triangular, otherwise, upper triangular. + overwrite_A (bool): + (not supported) + overwrite_b (bool): + Allows overwriting data in ``b``. + unit_diagonal (bool): + If True, diagonal elements of ``A`` are assumed to be 1 and will + not be referenced. + + Returns: + cupy.ndarray: + Solution to the system ``A x = b``. The shape is the same as ``b``. + """ + from cupyx import cusparse + + if not (cusparse.check_availability('spsm') or + cusparse.check_availability('csrsm2')): + raise NotImplementedError + + if not sparse.isspmatrix(A): + raise TypeError('A must be cupyx.scipy.sparse.spmatrix') + if not isinstance(b, cupy.ndarray): + raise TypeError('b must be cupy.ndarray') + if A.shape[0] != A.shape[1]: + raise ValueError(f'A must be a square matrix (A.shape: {A.shape})') + if b.ndim not in [1, 2]: + raise ValueError(f'b must be 1D or 2D array (b.shape: {b.shape})') + if A.shape[0] != b.shape[0]: + raise ValueError('The size of dimensions of A must be equal to the ' + 'size of the first dimension of b ' + f'(A.shape: {A.shape}, b.shape: {b.shape})') + if A.dtype.char not in 'fdFD': + raise TypeError(f'unsupported dtype (actual: {A.dtype})') + + if cusparse.check_availability('spsm') and _should_use_spsm(b): + if not (sparse.isspmatrix_csr(A) or + sparse.isspmatrix_csc(A) or + sparse.isspmatrix_coo(A)): + warnings.warn('CSR, CSC or COO format is required. Converting to ' + 'CSR format.', sparse.SparseEfficiencyWarning) + A = A.tocsr() + A.sum_duplicates() + x = cusparse.spsm(A, b, lower=lower, unit_diag=unit_diagonal) + elif cusparse.check_availability('csrsm2'): + if not (sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A)): + warnings.warn('CSR or CSC format is required. Converting to CSR ' + 'format.', sparse.SparseEfficiencyWarning) + A = A.tocsr() + A.sum_duplicates() + + if (overwrite_b and A.dtype == b.dtype and + (b._c_contiguous or b._f_contiguous)): + x = b + else: + x = b.astype(A.dtype, copy=True) + + cusparse.csrsm2(A, x, lower=lower, unit_diag=unit_diagonal) + else: + assert False + + if x.dtype.char in 'fF': + # Note: This is for compatibility with SciPy. + dtype = numpy.promote_types(x.dtype, 'float64') + x = x.astype(dtype) + return x + + +def spsolve(A, b): + """Solves a sparse linear system ``A x = b`` + + Args: + A (cupyx.scipy.sparse.spmatrix): + Sparse matrix with dimension ``(M, M)``. + b (cupy.ndarray): + Dense vector or matrix with dimension ``(M)`` or ``(M, N)``. + + Returns: + cupy.ndarray: + Solution to the system ``A x = b``. + """ + import cupyx.cusolver + + if not cupyx.cusolver.check_availability('csrlsvqr'): + raise NotImplementedError + if not sparse.isspmatrix(A): + raise TypeError('A must be cupyx.scipy.sparse.spmatrix') + if not isinstance(b, cupy.ndarray): + raise TypeError('b must be cupy.ndarray') + if A.shape[0] != A.shape[1]: + raise ValueError('A must be a square matrix (A.shape: {})'. + format(A.shape)) + if not (b.ndim == 1 or b.ndim == 2): + raise ValueError('Invalid b.shape (b.shape: {})'.format(b.shape)) + if A.shape[0] != b.shape[0]: + raise ValueError('matrix dimension mismatch (A.shape: {}, b.shape: {})' + .format(A.shape, b.shape)) + + if not sparse.isspmatrix_csr(A): + warnings.warn('CSR format is required. Converting to CSR format.', + sparse.SparseEfficiencyWarning) + A = A.tocsr() + A.sum_duplicates() + b = b.astype(A.dtype, copy=False) + + if b.ndim > 1: + res = cupy.empty_like(b) + for j in range(res.shape[1]): + res[:, j] = cupyx.cusolver.csrlsvqr(A, b[:, j]) + res = cupy.asarray(res, order='F') + return res + else: + return cupyx.cusolver.csrlsvqr(A, b) + + +class SuperLU(): + + def __init__(self, obj): + """LU factorization of a sparse matrix. + + Args: + obj (scipy.sparse.linalg.SuperLU): LU factorization of a sparse + matrix, computed by `scipy.sparse.linalg.splu`, etc. + """ + if not scipy_available: + raise RuntimeError('scipy is not available') + if not isinstance(obj, scipy.sparse.linalg.SuperLU): + raise TypeError('obj must be scipy.sparse.linalg.SuperLU') + + self.shape = obj.shape + self.nnz = obj.nnz + self.perm_r = cupy.array(obj.perm_r) + self.perm_c = cupy.array(obj.perm_c) + self.L = sparse.csr_matrix(obj.L.tocsr()) + self.U = sparse.csr_matrix(obj.U.tocsr()) + + self._perm_r_rev = cupy.argsort(self.perm_r) + self._perm_c_rev = cupy.argsort(self.perm_c) + + def solve(self, rhs, trans='N'): + """Solves linear system of equations with one or several right-hand sides. + + Args: + rhs (cupy.ndarray): Right-hand side(s) of equation with dimension + ``(M)`` or ``(M, K)``. + trans (str): 'N', 'T' or 'H'. + 'N': Solves ``A * x = rhs``. + 'T': Solves ``A.T * x = rhs``. + 'H': Solves ``A.conj().T * x = rhs``. + + Returns: + cupy.ndarray: + Solution vector(s) + """ # NOQA + from cupyx import cusparse + + if not isinstance(rhs, cupy.ndarray): + raise TypeError('ojb must be cupy.ndarray') + if rhs.ndim not in (1, 2): + raise ValueError('rhs.ndim must be 1 or 2 (actual: {})'. + format(rhs.ndim)) + if rhs.shape[0] != self.shape[0]: + raise ValueError('shape mismatch (self.shape: {}, rhs.shape: {})' + .format(self.shape, rhs.shape)) + if trans not in ('N', 'T', 'H'): + raise ValueError('trans must be \'N\', \'T\', or \'H\'') + + if cusparse.check_availability('spsm') and _should_use_spsm(rhs): + def spsm(A, B, lower, transa): + return cusparse.spsm(A, B, lower=lower, transa=transa) + sm = spsm + elif cusparse.check_availability('csrsm2'): + def csrsm2(A, B, lower, transa): + cusparse.csrsm2(A, B, lower=lower, transa=transa) + return B + sm = csrsm2 + else: + raise NotImplementedError + + x = rhs.astype(self.L.dtype) + if trans == 'N': + if self.perm_r is not None: + if x.ndim == 2 and x._f_contiguous: + x = x.T[:, self._perm_r_rev].T # want to keep f-order + else: + x = x[self._perm_r_rev] + x = sm(self.L, x, lower=True, transa=trans) + x = sm(self.U, x, lower=False, transa=trans) + if self.perm_c is not None: + x = x[self.perm_c] + else: + if self.perm_c is not None: + if x.ndim == 2 and x._f_contiguous: + x = x.T[:, self._perm_c_rev].T # want to keep f-order + else: + x = x[self._perm_c_rev] + x = sm(self.U, x, lower=False, transa=trans) + x = sm(self.L, x, lower=True, transa=trans) + if self.perm_r is not None: + x = x[self.perm_r] + + if not x._f_contiguous: + # For compatibility with SciPy + x = x.copy(order='F') + return x + + +class CusparseLU(SuperLU): + + def __init__(self, a): + """Incomplete LU factorization of a sparse matrix. + + Args: + a (cupyx.scipy.sparse.csr_matrix): Incomplete LU factorization of a + sparse matrix, computed by `cusparse.csrilu02`. + """ + if not scipy_available: + raise RuntimeError('scipy is not available') + if not sparse.isspmatrix_csr(a): + raise TypeError('a must be cupyx.scipy.sparse.csr_matrix') + + self.shape = a.shape + self.nnz = a.nnz + self.perm_r = None + self.perm_c = None + # TODO(anaruse): Computes tril and triu on GPU + a = a.get() + al = scipy.sparse.tril(a) + al.setdiag(1.0) + au = scipy.sparse.triu(a) + self.L = sparse.csr_matrix(al.tocsr()) + self.U = sparse.csr_matrix(au.tocsr()) + + +def factorized(A): + """Return a function for solving a sparse linear system, with A pre-factorized. + + Args: + A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize. + + Returns: + callable: a function to solve the linear system of equations given in + ``A``. + + Note: + This function computes LU decomposition of a sparse matrix on the CPU + using `scipy.sparse.linalg.splu`. Therefore, LU decomposition is not + accelerated on the GPU. On the other hand, the computation of solving + linear equations using the method returned by this function is + performed on the GPU. + + .. seealso:: :func:`scipy.sparse.linalg.factorized` + """ # NOQA + return splu(A).solve + + +def splu(A, permc_spec=None, diag_pivot_thresh=None, relax=None, + panel_size=None, options={}): + """Computes the LU decomposition of a sparse square matrix. + + Args: + A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize. + permc_spec (str): (For further augments, see + :func:`scipy.sparse.linalg.splu`) + diag_pivot_thresh (float): + relax (int): + panel_size (int): + options (dict): + + Returns: + cupyx.scipy.sparse.linalg.SuperLU: + Object which has a ``solve`` method. + + Note: + This function LU-decomposes a sparse matrix on the CPU using + `scipy.sparse.linalg.splu`. Therefore, LU decomposition is not + accelerated on the GPU. On the other hand, the computation of solving + linear equations using the ``solve`` method, which this function + returns, is performed on the GPU. + + .. seealso:: :func:`scipy.sparse.linalg.splu` + """ + if not scipy_available: + raise RuntimeError('scipy is not available') + if not sparse.isspmatrix(A): + raise TypeError('A must be cupyx.scipy.sparse.spmatrix') + if A.shape[0] != A.shape[1]: + raise ValueError('A must be a square matrix (A.shape: {})' + .format(A.shape)) + if A.dtype.char not in 'fdFD': + raise TypeError('Invalid dtype (actual: {})'.format(A.dtype)) + + a = A.get().tocsc() + a_inv = scipy.sparse.linalg.splu( + a, permc_spec=permc_spec, diag_pivot_thresh=diag_pivot_thresh, + relax=relax, panel_size=panel_size, options=options) + return SuperLU(a_inv) + + +def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, + permc_spec=None, diag_pivot_thresh=None, relax=None, + panel_size=None, options={}): + """Computes the incomplete LU decomposition of a sparse square matrix. + + Args: + A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize. + drop_tol (float): (For further augments, see + :func:`scipy.sparse.linalg.spilu`) + fill_factor (float): + drop_rule (str): + permc_spec (str): + diag_pivot_thresh (float): + relax (int): + panel_size (int): + options (dict): + + Returns: + cupyx.scipy.sparse.linalg.SuperLU: + Object which has a ``solve`` method. + + Note: + This function computes incomplete LU decomposition of a sparse matrix + on the CPU using `scipy.sparse.linalg.spilu` (unless you set + ``fill_factor`` to ``1``). Therefore, incomplete LU decomposition is + not accelerated on the GPU. On the other hand, the computation of + solving linear equations using the ``solve`` method, which this + function returns, is performed on the GPU. + + If you set ``fill_factor`` to ``1``, this function computes incomplete + LU decomposition on the GPU, but without fill-in or pivoting. + + .. seealso:: :func:`scipy.sparse.linalg.spilu` + """ + from cupyx import cusparse + + if not scipy_available: + raise RuntimeError('scipy is not available') + if not sparse.isspmatrix(A): + raise TypeError('A must be cupyx.scipy.sparse.spmatrix') + if A.shape[0] != A.shape[1]: + raise ValueError('A must be a square matrix (A.shape: {})' + .format(A.shape)) + if A.dtype.char not in 'fdFD': + raise TypeError('Invalid dtype (actual: {})'.format(A.dtype)) + + if fill_factor == 1: + # Computes ILU(0) on the GPU using cuSparse functions + if not sparse.isspmatrix_csr(A): + a = A.tocsr() + else: + a = A.copy() + cusparse.csrilu02(a) + return CusparseLU(a) + + a = A.get().tocsc() + a_inv = scipy.sparse.linalg.spilu( + a, fill_factor=fill_factor, drop_tol=drop_tol, drop_rule=drop_rule, + permc_spec=permc_spec, diag_pivot_thresh=diag_pivot_thresh, + relax=relax, panel_size=panel_size, options=options) + return SuperLU(a_inv) + + +def _symOrtho(a, b): + """ + A stable implementation of Givens rotation according to + S.-C. Choi, "Iterative Methods for Singular Linear Equations + and Least-Squares Problems", Dissertation, + http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf + """ + if b == 0: + return numpy.sign(a), 0, abs(a) + elif a == 0: + return 0, numpy.sign(b), abs(b) + elif abs(b) > abs(a): + tau = a / b + s = numpy.sign(b) / numpy.sqrt(1+tau*tau) + c = s * tau + r = b / s + else: + tau = b / a + c = numpy.sign(a) / numpy.sqrt(1+tau*tau) + s = c * tau + r = a / c + return c, s, r + + +def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, + M=None, callback=None, check=False): + """Uses MINimum RESidual iteration to solve ``Ax = b``. + + Args: + A (ndarray, spmatrix or LinearOperator): The real or complex matrix of + the linear system with shape ``(n, n)``. + b (cupy.ndarray): Right hand side of the linear system with shape + ``(n,)`` or ``(n, 1)``. + x0 (cupy.ndarray): Starting guess for the solution. + shift (int or float): If shift != 0 then the method solves + ``(A - shift*I)x = b`` + tol (float): Tolerance for convergence. + maxiter (int): Maximum number of iterations. + M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``. + The preconditioner should approximate the inverse of ``A``. + ``M`` must be :class:`cupy.ndarray`, + :class:`cupyx.scipy.sparse.spmatrix` or + :class:`cupyx.scipy.sparse.linalg.LinearOperator`. + callback (function): User-specified function to call after each + iteration. It is called as ``callback(xk)``, where ``xk`` is the + current solution vector. + + Returns: + tuple: + It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is + the converged solution and ``info`` provides convergence + information. + + .. seealso:: :func:`scipy.sparse.linalg.minres` + """ + A, M, x, b = _make_system(A, M, x0, b) + + matvec = A.matvec + psolve = M.matvec + + n = b.shape[0] + + if maxiter is None: + maxiter = n * 5 + + istop = 0 + itn = 0 + Anorm = 0 + Acond = 0 + rnorm = 0 + ynorm = 0 + + xtype = x.dtype + + eps = cupy.finfo(xtype).eps + + Ax = matvec(x) + r1 = b - Ax + y = psolve(r1) + + beta1 = cupy.inner(r1, y) + + if beta1 < 0: + raise ValueError('indefinite preconditioner') + elif beta1 == 0: + return x, 0 + + beta1 = cupy.sqrt(beta1) + beta1 = beta1.get().item() + + if check: + # see if A is symmetric + if not _check_symmetric(A, Ax, x, eps): + raise ValueError('non-symmetric matrix') + + # see if M is symmetric + if not _check_symmetric(M, y, r1, eps): + raise ValueError('non-symmetric preconditioner') + + oldb = 0 + beta = beta1 + dbar = 0 + epsln = 0 + qrnorm = beta1 + phibar = beta1 + rhs1 = beta1 + rhs2 = 0 + tnorm2 = 0 + gmax = 0 + gmin = cupy.finfo(xtype).max + cs = -1 + sn = 0 + w = cupy.zeros(n, dtype=xtype) + w2 = cupy.zeros(n, dtype=xtype) + r2 = r1 + + while itn < maxiter: + + itn += 1 + s = 1.0 / beta + v = s * y + + y = matvec(v) + y -= shift * v + + if itn >= 2: + y -= (beta / oldb) * r1 + + alpha = cupy.inner(v, y) + alpha = alpha.get().item() + y -= (alpha / beta) * r2 + r1 = r2 + r2 = y + y = psolve(r2) + oldb = beta + beta = cupy.inner(r2, y) + beta = beta.get().item() + beta = numpy.sqrt(beta) + if beta < 0: + raise ValueError('non-symmetric matrix') + + tnorm2 += alpha ** 2 + oldb ** 2 + beta ** 2 + + if itn == 1: + if beta / beta1 <= 10 * eps: + istop = -1 + + # Apply previous rotation Qk-1 to get + # [deltak epslnk+1] = [cs sn][dbark 0 ] + # [gbar k dbar k+1] [sn -cs][alfak betak+1]. + + oldeps = epsln + delta = cs * dbar + sn * alpha # delta1 = 0 deltak + gbar = sn * dbar - cs * alpha # gbar 1 = alfa1 gbar k + epsln = sn * beta # epsln2 = 0 epslnk+1 + dbar = - cs * beta # dbar 2 = beta2 dbar k+1 + root = numpy.linalg.norm([gbar, dbar]) + + # Compute the next plane rotation Qk + + gamma = numpy.linalg.norm([gbar, beta]) # gammak + gamma = max(gamma, eps) + cs = gbar / gamma # ck + sn = beta / gamma # sk + phi = cs * phibar # phik + phibar = sn * phibar # phibark+1 + + # Update x. + + denom = 1.0 / gamma + w1 = w2 + w2 = w + w = (v - oldeps * w1 - delta * w2) * denom + x += phi * w + + # Go round again. + + gmax = max(gmax, gamma) + gmin = min(gmin, gamma) + z = rhs1 / gamma + rhs1 = rhs2 - delta * z + rhs2 = - epsln * z + + # Estimate various norms and test for convergence. + + Anorm = numpy.sqrt(tnorm2) + ynorm = cupy.linalg.norm(x) + ynorm = ynorm.get().item() + epsa = Anorm * eps + epsx = Anorm * ynorm * eps + diag = gbar + + if diag == 0: + diag = epsa + + qrnorm = phibar + rnorm = qrnorm + if ynorm == 0 or Anorm == 0: + test1 = numpy.inf + else: + test1 = rnorm / (Anorm * ynorm) # ||r|| / (||A|| ||x||) + if Anorm == 0: + test2 = numpy.inf + else: + test2 = root / Anorm # ||Ar|| / (||A|| ||r||) + + # Estimate cond(A). + # In this version we look at the diagonals of R in the + # factorization of the lower Hessenberg matrix, Q * H = R, + # where H is the tridiagonal matrix from Lanczos with one + # extra row, beta(k+1) e_k^T. + + Acond = gmax / gmin + + # See if any of the stopping criteria are satisfied. + # In rare cases, istop is already -1 from above (Abar = const*I). + + if istop == 0: + t1 = 1 + test1 # These tests work if tol < eps + t2 = 1 + test2 + if t2 <= 1: + istop = 2 + if t1 <= 1: + istop = 1 + + if itn >= maxiter: + istop = 6 + if Acond >= 0.1 / eps: + istop = 4 + if epsx >= beta1: + istop = 3 + # epsr = Anorm * ynorm * tol + # if rnorm <= epsx : istop = 2 + # if rnorm <= epsr : istop = 1 + if test2 <= tol: + istop = 2 + if test1 <= tol: + istop = 1 + + if callback is not None: + callback(x) + + if istop != 0: + break + + if istop == 6: + info = maxiter + else: + info = 0 + + return x, info + + +def _check_symmetric(op1, op2, vec, eps): + r2 = op1 * op2 + s = cupy.inner(op2, op2) + t = cupy.inner(vec, r2) + z = abs(s - t) + epsa = (s + eps) * eps ** (1.0 / 3.0) + if z > epsa: + return False + return True diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca5aa898c01f94443f7df839e3a77768c8b091a3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_basic.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52fc3f3517cc5ccc5a8623266703e7446ff84a7a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_basic.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_bessel.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b3d649576bfabc9927a3122f176aaf965d53335 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_bessel.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_beta.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_beta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65f8b0870f8d34c5299fd8da2bdfc6c426bee83e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_beta.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_binom.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_binom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b53aa473592cceb1f43e1b7c8d9d1a55fcfdd67 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_binom.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_complexstuff.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_complexstuff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8e129a643bcce189a7f770405c751ad1856e666 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_complexstuff.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_convex_analysis.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_convex_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9c389a272d0b5f315de271b38cf3daef0dc65e0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_convex_analysis.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_digamma.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_digamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f7ab853cffd2b4187c057cc1539648d0870fc21 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_digamma.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_ellip.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_ellip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0fad7dc8486ee62b1c414b17f785fbe0d978d39 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_ellip.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_erf.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_erf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e339251425484ecefdb991bf479a4dc35073ee2a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_erf.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_exp1.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_exp1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e74be03a87df10f61330fa22989a74084617d3b5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_exp1.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expi.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea40e51b234d3f95223066332211616e8aa0738 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expi.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295d501676baed54147203dd221583de2115c5e6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gamma.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24a77b216b4596c1e88fa538bb59c39fc61bd7a6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gamma.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammainc.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammainc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf7782c8d371ad6d2be8f18f6cf2733c473d6399 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammainc.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammaln.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammaln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9adab5c877d7329d9f4089a16391b057502c0f90 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammaln.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammasgn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammasgn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e530628f3ffccb2f4cee899dd79da2ce98ea7956 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammasgn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_loggamma.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_loggamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17aa31ec81679c97ab2789bf154cb697ff1c3559 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_loggamma.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_logsoftmax.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_logsoftmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95a5f7c7a96b5ef1acc1ea829f30d0f697dc335e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_logsoftmax.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_logsumexp.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_logsumexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a6dafb6d6256a884e9f81dc470ad51c47bf030d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_logsumexp.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_lpmv.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_lpmv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a0399596a213100f5ce505cda2d5a4f969dab7d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_lpmv.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_poch.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_poch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7181d620c76b163ed73e85e67032f77053481f1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_poch.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_polygamma.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_polygamma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df9d86aa4ec8babcfbb79b95c31937d9569faa8e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_polygamma.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_softmax.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15a6234314719eaedcb8c71dcf7bc0d6650f369a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_softmax.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_sph_harm.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_sph_harm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e356c33bd6cbe5384091f910305e8e688223d0c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_sph_harm.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44bceebd8634b3a381c870a314bbc5463f633bb4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_statistics.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_statistics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cefa741f941f3d085cf348269c252ed3f972884 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_statistics.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_stats_distributions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_stats_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b2aec4594b94690fd60c23d7da591fd17a049f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_stats_distributions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_trig.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_trig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d103bbb1050a546c094f38f96bc337bf9efe1713 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_trig.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_xlogy.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_xlogy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c654fe0118c8492d015f45eae913170f0cb0ca18 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_xlogy.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_zeta.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_zeta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b33eb3ad13fcd49e8be1c60be69c7bc979fe80d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_zeta.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_zetac.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_zetac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db02badb60488ea3b81c4da8796d1b681a39deae Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_zetac.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_basic.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..8476ba7e13b9b335b42799e86524c0721fb0ec5c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_basic.py @@ -0,0 +1,297 @@ +"""basic special functions + + +cotdg and tandg implementations are adapted from the following SciPy code: + +https://github.com/scipy/scipy/blob/master/scipy/special/cephes/tandg.c + +radian is from + +https://github.com/scipy/scipy/blob/master/scipy/special/cephes/sindg.c + +cosm1 is from + +https://github.com/scipy/scipy/blob/main/scipy/special/cephes/unity.c + +polevl is from + +https://github.com/scipy/scipy/blob/main/scipy/special/cephes/polevl.h + + +Cephes Math Library Release 2.0: April, 1987 +Copyright 1984, 1987 by Stephen L. Moshier +Direct inquiries to 30 Frost Street, Cambridge, MA 02140 +""" + +from cupy import _core + + +# Note: cast complex to complex or tests fail tolerance +log1p = _core.create_ufunc( + 'cupyx_scipy_log1p', + (('f->f', 'out0 = log1pf(in0)'), + ('F->F', 'out0 = out0_type(log1p((complex)in0))'), + 'd->d', 'D->D'), + 'out0 = log1p(in0);', + doc="""Elementwise function for scipy.special.log1p + + Calculates log(1 + x) for use when `x` is near zero. + + Notes + ----- + This implementation currently does not support complex-valued `x`. + + .. seealso:: :meth:`scipy.special.log1p` + + """, +) + + +cbrt = _core.create_ufunc( + 'cupyx_scipy_special_cbrt', + (('f->f', 'out0 = cbrtf(in0)'), 'd->d'), + 'out0 = cbrt(in0)', + doc='''Cube root. + + .. seealso:: :meth:`scipy.special.cbrt` + + ''') + + +exp2 = _core.create_ufunc( + 'cupyx_scipy_special_exp2', + (('f->f', 'out0 = exp2f(in0)'), 'd->d'), + 'out0 = exp2(in0)', + doc='''Computes ``2**x``. + + .. seealso:: :meth:`scipy.special.exp2` + + ''') + + +exp10 = _core.create_ufunc( + 'cupyx_scipy_special_exp10', + (('f->f', 'out0 = exp10f(in0)'), 'd->d'), + 'out0 = exp10(in0)', + doc='''Computes ``10**x``. + + .. seealso:: :meth:`scipy.special.exp10` + + ''') + + +expm1 = _core.create_ufunc( + 'cupyx_scipy_special_expm1', + (('f->f', 'out0 = expm1f(in0)'), 'd->d', 'F->F', 'D->D'), + 'out0 = expm1(in0)', + doc='''Computes ``exp(x) - 1``. + + .. seealso:: :meth:`scipy.special.expm1` + + ''') + +exprel = _core.create_ufunc( + 'cupyx_scipy_special_exprel', + (('f->f'), 'd->d', 'F->F', 'D->D'), + 'out0 = abs(in0) >= 1e-16 ? expm1(in0) / in0 : 1', + doc='''Computes ``(exp(x) - 1) / x``. + + .. seealso:: :meth:`scipy.special.exprel` + + ''') + +cosm1_implementation = """ +//Define from npy_math.h +//https://github.com/numpy/numpy/blob/main/numpy/core/include/numpy/npy_math.h +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +__constant__ double coscof[] = { + 4.7377507964246204691685E-14, + -1.1470284843425359765671E-11, + 2.0876754287081521758361E-9, + -2.7557319214999787979814E-7, + 2.4801587301570552304991E-5, + -1.3888888888888872993737E-3, + 4.1666666666666666609054E-2, +}; + +__device__ static double polevl(double x, const double coef[], int N) +{ + double ans; + int i; + const double *p; + + p = coef; + ans = *p++; + i = N; + + do + ans = ans * x + *p++; + while (--i); + + return (ans); +} + +__device__ static double cosm1(double x) +{ + double xx; + + if ((x < -NPY_PI_4) || (x > NPY_PI_4)) + return (cos(x) - 1.0); + xx = x * x; + xx = -0.5 * xx + xx * xx * polevl(xx, coscof, 6); + return xx; +} +""" + +cosm1 = _core.create_ufunc( + 'cupyx_scipy_special_cosm1', ('f->f', 'd->d'), + 'out0 = cosm1(in0)', + preamble=cosm1_implementation, + doc='''Computes ``cos(x) - 1``. + + .. seealso:: :meth:`scipy.special.cosm1` + + ''') + +pi180_preamble = """ + __constant__ double PI180 = 1.74532925199432957692E-2; // pi/180 +""" + +cosdg = _core.create_ufunc( + 'cupyx_scipy_special_cosdg', + (('f->f', 'out0 = cosf(PI180 * in0)'), 'd->d'), + 'out0 = cos(PI180 * in0)', + preamble=pi180_preamble, + doc='''Cosine of x with x in degrees. + + .. seealso:: :meth:`scipy.special.cosdg` + + ''') + + +sindg = _core.create_ufunc( + 'cupyx_scipy_special_sindg', + (('f->f', 'out0 = sinf(PI180 * in0)'), 'd->d'), + 'out0 = sin(PI180 * in0)', + preamble=pi180_preamble, + doc='''Sine of x with x in degrees. + + .. seealso:: :meth:`scipy.special.sindg` + + ''') + + +tancot_implementation = pi180_preamble + """ + + +// include for CUDART_INF +#include + +__constant__ double lossth = 1.0e14; + +__device__ static double tancot(double, int); + +__device__ static double tandg(double x) +{ + return tancot(x, 0); +} + + +__device__ static double cotdg(double x) +{ + return tancot(x, 1); +} + + +__device__ static double tancot(double xx, int cotflg) +{ + double x; + int sign; + + /* make argument positive but save the sign */ + if (xx < 0) { + x = -xx; + sign = -1; + } + else { + x = xx; + sign = 1; + } + + if (x > lossth) { + // sf_error("tandg", SF_ERROR_NO_RESULT, NULL); + return 0.0; + } + + /* modulo 180 */ + x = x - 180.0 * floor(x / 180.0); + if (cotflg) { + if (x <= 90.0) { + x = 90.0 - x; + } else { + x = x - 90.0; + sign *= -1; + } + } else { + if (x > 90.0) { + x = 180.0 - x; + sign *= -1; + } + } + if (x == 0.0) { + return 0.0; + } + else if (x == 45.0) { + return sign * 1.0; + } + else if (x == 90.0) { + // sf_error((cotflg ? "cotdg" : "tandg"), SF_ERROR_SINGULAR, NULL); + return CUDART_INF; + } + /* x is now transformed into [0, 90) */ + return sign * tan(x * PI180); +} +""" + +tandg = _core.create_ufunc( + 'cupyx_scipy_special_tandg', ('f->f', 'd->d'), + 'out0 = tandg(in0)', + preamble=tancot_implementation, + doc='''Tangent of x with x in degrees. + + .. seealso:: :meth:`scipy.special.tandg` + + ''') + + +cotdg = _core.create_ufunc( + 'cupyx_scipy_special_cotdg', ('f->f', 'd->d'), + 'out0 = cotdg(in0)', + preamble=tancot_implementation, + doc='''Cotangent of x with x in degrees. + + .. seealso:: :meth:`scipy.special.cotdg` + + ''') + +radian_implementation = """ +/* 1 arc second, in radians*/ +__constant__ double P64800 = + 4.848136811095359935899141023579479759563533023727e-6; + +template +__device__ T radian(T d, T m, T s) +{ + return (((d * 60.0 + m) * 60.0 + s) * P64800); +} +""" + +radian = _core.create_ufunc( + 'cupyx_scipy_special_radian', ('fff->f', 'ddd->d'), + 'out0 = radian(in0, in1, in2)', + preamble=radian_implementation, + doc='''Degrees, minutes, seconds to radians: + + .. seealso:: :meth:`scipy.special.radian` + + ''') diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_beta.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_beta.py new file mode 100644 index 0000000000000000000000000000000000000000..fb1568c72013c6a674aef739165bf81d0049e5fa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_beta.py @@ -0,0 +1,1091 @@ +"""Beta and log(abs(beta)) functions. + +Also the incomplete beta function and its inverse. + +The source code here is an adaptation with minimal changes from the following +files in SciPy's bundled Cephes library: + +https://github.com/scipy/scipy/blob/main/scipy/special/cephes/beta.c +https://github.com/scipy/scipy/blob/main/scipy/special/cephes/incbet.c +https://github.com/scipy/scipy/blob/main/scipy/special/cephes/incbi.c + +Cephes Math Library, Release 2.3: March, 1995 +Copyright 1984, 1995 by Stephen L. Moshier +""" + +from cupy import _core +from cupyx.scipy.special._digamma import polevl_definition +from cupyx.scipy.special._gamma import gamma_definition +from cupyx.scipy.special._gammainc import p1evl_definition + + +beta_preamble = """ +// include for CUDART_INF, CUDART_NAN +#include + +// defines from /scipy/special/cephes/const.c +// The ones chosen here are the IEEE variants from that file +__constant__ double MAXLOG = 7.09782712893383996732E2; +__constant__ double MINLOG = -7.08396418532264106224E2; +__constant__ double MACHEP = 1.11022302462515654042E-16; // 2**-53 + +// defines from npy_math.h +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ + +#define MAXGAM 171.624376956302725 +#define ASYMP_FACTOR 1e6 +""" + + +lgam_sgn_definition = """ + + +/* A[]: Stirling's formula expansion of log Gamma + * B[], C[]: log Gamma function between 2 and 3 + */ +__constant__ double A[] = { + 8.11614167470508450300E-4, + -5.95061904284301438324E-4, + 7.93650340457716943945E-4, + -2.77777777730099687205E-3, + 8.33333333333331927722E-2 +}; + +__constant__ double B[] = { + -1.37825152569120859100E3, + -3.88016315134637840924E4, + -3.31612992738871184744E5, + -1.16237097492762307383E6, + -1.72173700820839662146E6, + -8.53555664245765465627E5 +}; + +__constant__ double C[] = { + /* 1.00000000000000000000E0, */ + -3.51815701436523470549E2, + -1.70642106651881159223E4, + -2.20528590553854454839E5, + -1.13933444367982507207E6, + -2.53252307177582951285E6, + -2.01889141433532773231E6 +}; + +/* log( sqrt( 2*pi ) ) */ +__constant__ double LS2PI = 0.91893853320467274178; + +__constant__ double LOGPI = 1.14472988584940017414; + +#define MAXLGM 2.556348e305 + + +__noinline__ __device__ static double lgam_sgn(double x, int *sign) +{ + double p, q, u, w, z; + int i; + + *sign = 1; + + if (isinf(x)) { + return x; + } + + if (x < -34.0) { + q = -x; + w = lgam_sgn(q, sign); + p = floor(q); + if (p == q) { + return CUDART_INF; + } + i = p; + if ((i & 1) == 0) + *sign = -1; + else + *sign = 1; + z = q - p; + if (z > 0.5) { + p += 1.0; + z = p - q; + } + z = q * sin(NPY_PI * z); + if (z == 0.0){ + return CUDART_INF; + } + /* z = log(NPY_PI) - log( z ) - w; */ + z = LOGPI - log(z) - w; + return z; + } + + if (x < 13.0) { + z = 1.0; + p = 0.0; + u = x; + while (u >= 3.0) { + p -= 1.0; + u = x + p; + z *= u; + } + while (u < 2.0) { + if (u == 0.0) { + return CUDART_INF; + } + z /= u; + p += 1.0; + u = x + p; + } + if (z < 0.0) { + *sign = -1; + z = -z; + } + else { + *sign = 1; + } + if (u == 2.0) { + return log(z); + } + p -= 2.0; + x = x + p; + p = x * polevl<5>(x, B) / p1evl<6>(x, C); + return (log(z) + p); + } + + if (x > MAXLGM) { + return (*sign * CUDART_INF); + } + + q = (x - 0.5) * log(x) - x + LS2PI; + if (x > 1.0e8) { + return q; + } + + p = 1.0 / (x * x); + if (x >= 1000.0) { + q += ((7.9365079365079365079365e-4 * p + - 2.7777777777777777777778e-3) * p + + 0.0833333333333333333333) / x; + } else { + q += polevl<4>(p, A) / x; + } + return q; +} + + +__device__ static double lgam(double x) +{ + int sign; + return lgam_sgn(x, &sign); +} + +""" + +lbeta_symp_definition = """ +/* + * Asymptotic expansion for ln(|B(a, b)|) for a > ASYMP_FACTOR*max(|b|, 1). + */ +__noinline__ __device__ double lbeta_asymp(double a, double b, int *sgn) +{ + double r = lgam_sgn(b, sgn); + r -= b * log(a); + + r += b*(1-b)/(2*a); + r += b*(1-b)*(1-2*b)/(12*a*a); + r += - b*b*(1-b)*(1-b)/(12*a*a*a); + + return r; +} +""" + + +beta_definition = """ + +__noinline__ __device__ double beta(double, double); + + +/* + * Special case for a negative integer argument + */ + +__noinline__ __device__ static double beta_negint(int a, double b) +{ + int sgn; + if (b == (int)b && 1 - a - b > 0) { + sgn = ((int)b % 2 == 0) ? 1 : -1; + return sgn * beta(1 - a - b, b); + } + else { + return CUDART_INF; + } +} + +__noinline__ __device__ double beta(double a, double b) +{ + double y; + int sign = 1; + + if (a <= 0.0) { + if (a == floor(a)) { + if (a == (int)a) { + return beta_negint((int)a, b); + } + else { + return CUDART_INF; + } + } + } + + if (b <= 0.0) { + if (b == floor(b)) { + if (b == (int)b) { + return beta_negint((int)b, a); + } + else { + return CUDART_INF; + } + } + } + + if (fabs(a) < fabs(b)) { + y = a; a = b; b = y; + } + + if (fabs(a) > ASYMP_FACTOR * fabs(b) && a > ASYMP_FACTOR) { + /* Avoid loss of precision in lgam(a + b) - lgam(a) */ + y = lbeta_asymp(a, b, &sign); + return sign * exp(y); + } + + y = a + b; + if (fabs(y) > MAXGAM || fabs(a) > MAXGAM || fabs(b) > MAXGAM) { + int sgngam; + y = lgam_sgn(y, &sgngam); + sign *= sgngam; /* keep track of the sign */ + y = lgam_sgn(b, &sgngam) - y; + sign *= sgngam; + y = lgam_sgn(a, &sgngam) + y; + sign *= sgngam; + if (y > MAXLOG) { + return sign * CUDART_INF; + } + return sign * exp(y); + } + + y = Gamma(y); + a = Gamma(a); + b = Gamma(b); + if (y == 0.0) { + return sign * CUDART_INF; + } + + if (fabs(fabs(a) - fabs(y)) > fabs(fabs(b) - fabs(y))) { + y = b / y; + y *= a; + } + else { + y = a / y; + y *= b; + } + + return (y); +} +""" + + +lbeta_definition = """ + +__noinline__ __device__ double lbeta(double, double); + + +/* + * Special case for a negative integer argument + */ + +__noinline__ __device__ static double lbeta_negint(int a, double b) +{ + double r; + if (b == (int)b && 1 - a - b > 0) { + r = lbeta(1 - a - b, b); + return r; + } + else { + return CUDART_INF; + } +} + +// Natural log of |beta| + +__noinline__ __device__ double lbeta(double a, double b) +{ + double y; + int sign; + + sign = 1; + + if (a <= 0.0) { + if (a == floor(a)) { + if (a == (int)a) { + return lbeta_negint((int)a, b); + } + else { + return CUDART_INF; + } + } + } + + if (b <= 0.0) { + if (b == floor(b)) { + if (b == (int)b) { + return lbeta_negint((int)b, a); + } + else { + return CUDART_INF; + } + } + } + + if (fabs(a) < fabs(b)) { + y = a; a = b; b = y; + } + + if (fabs(a) > ASYMP_FACTOR * fabs(b) && a > ASYMP_FACTOR) { + /* Avoid loss of precision in lgam(a + b) - lgam(a) */ + y = lbeta_asymp(a, b, &sign); + return y; + } + + y = a + b; + if (fabs(y) > MAXGAM || fabs(a) > MAXGAM || fabs(b) > MAXGAM) { + int sgngam; + y = lgam_sgn(y, &sgngam); + sign *= sgngam; /* keep track of the sign */ + y = lgam_sgn(b, &sgngam) - y; + sign *= sgngam; + y = lgam_sgn(a, &sgngam) + y; + sign *= sgngam; + return (y); + } + + y = Gamma(y); + a = Gamma(a); + b = Gamma(b); + if (y == 0.0) { + return (sign * CUDART_INF); + } + + if (fabs(fabs(a) - fabs(y)) > fabs(fabs(b) - fabs(y))) { + y = b / y; + y *= a; + } + else { + y = a / y; + y *= b; + } + + if (y < 0) { + y = -y; + } + return log(y); +} + +""" + + +beta = _core.create_ufunc( + "cupyx_scipy_beta", + ("ff->f", "dd->d"), + "out0 = out0_type(beta(in0, in1));", + preamble=( + beta_preamble + + gamma_definition + + polevl_definition + + p1evl_definition + + lgam_sgn_definition + + lbeta_symp_definition + + beta_definition + ), + doc="""Beta function. + + Parameters + ---------- + a, b : cupy.ndarray + Real-valued arguments + out : cupy.ndarray, optional + Optional output array for the function result + + Returns + ------- + scalar or ndarray + Value of the beta function + + See Also + -------- + :func:`scipy.special.beta` + + """, +) + + +betaln = _core.create_ufunc( + "cupyx_scipy_betaln", + ("ff->f", "dd->d"), + "out0 = out0_type(lbeta(in0, in1));", + preamble=( + beta_preamble + + gamma_definition + + polevl_definition + + p1evl_definition + + lgam_sgn_definition + + lbeta_symp_definition + + lbeta_definition + ), + doc="""Natural logarithm of absolute value of beta function. + + Computes ``ln(abs(beta(a, b)))``. + + Parameters + ---------- + a, b : cupy.ndarray + Real-valued arguments + out : cupy.ndarray, optional + Optional output array for the function result + + Returns + ------- + scalar or ndarray + Value of the natural log of the magnitude of beta. + + See Also + -------- + :func:`scipy.special.betaln` + + """, +) + + +incbet_definition = """ + +__noinline__ __device__ static double incbd(double, double, double); +__noinline__ __device__ static double incbcf(double, double, double); +__noinline__ __device__ static double pseries(double, double, double); + +__constant__ double big = 4.503599627370496e15; +__constant__ double biginv = 2.22044604925031308085e-16; + +__noinline__ __device__ double incbet(double aa, double bb, double xx) +{ + double a, b, t, x, xc, w, y; + int flag; + + if (aa <= 0.0 || bb <= 0.0) + { + return CUDART_NAN; + } + + if ((xx <= 0.0) || (xx >= 1.0)) { + if (xx == 0.0) { + return 0.0; + } + if (xx == 1.0) { + return 1.0; + } + return CUDART_NAN; + } + + flag = 0; + if ((bb * xx) <= 1.0 && xx <= 0.95) { + t = pseries(aa, bb, xx); + goto done; + } + + w = 1.0 - xx; + + /* Reverse a and b if x is greater than the mean. */ + if (xx > (aa / (aa + bb))) { + flag = 1; + a = bb; + b = aa; + xc = xx; + x = w; + } else { + a = aa; + b = bb; + xc = w; + x = xx; + } + + if (flag == 1 && (b * x) <= 1.0 && x <= 0.95) { + t = pseries(a, b, x); + goto done; + } + + /* Choose expansion for better convergence. */ + y = x * (a + b - 2.0) - (a - 1.0); + if (y < 0.0) { + w = incbcf(a, b, x); + } else { + w = incbd(a, b, x) / xc; + } + + /* Multiply w by the factor + * a b _ _ _ + * x (1-x) | (a+b) / ( a | (a) | (b) ) . */ + + y = a * log(x); + t = b * log(xc); + if ((a + b) < MAXGAM && fabs(y) < MAXLOG && fabs(t) < MAXLOG) { + t = pow(xc, b); + t *= pow(x, a); + t /= a; + t *= w; + t *= 1.0 / beta(a, b); + goto done; + } + /* Resort to logarithms. */ + y += t - lbeta(a,b); + y += log(w / a); + if (y < MINLOG) { + t = 0.0; + } else { + t = exp(y); + } + + done: + + if (flag == 1) { + if (t <= MACHEP) { + t = 1.0 - MACHEP; + } else { + t = 1.0 - t; + } + } + return t; +} + + +/* Continued fraction expansion #1 + * for incomplete beta integral + */ + +__noinline__ __device__ static double incbcf(double a, double b, double x) +{ + double xk, pk, pkm1, pkm2, qk, qkm1, qkm2; + double k1, k2, k3, k4, k5, k6, k7, k8; + double r, t, ans, thresh; + int n; + + k1 = a; + k2 = a + b; + k3 = a; + k4 = a + 1.0; + k5 = 1.0; + k6 = b - 1.0; + k7 = k4; + k8 = a + 2.0; + + pkm2 = 0.0; + qkm2 = 1.0; + pkm1 = 1.0; + qkm1 = 1.0; + ans = 1.0; + r = 1.0; + n = 0; + thresh = 3.0 * MACHEP; + do { + + xk = -(x * k1 * k2) / (k3 * k4); + pk = pkm1 + pkm2 * xk; + qk = qkm1 + qkm2 * xk; + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + xk = (x * k5 * k6) / (k7 * k8); + pk = pkm1 + pkm2 * xk; + qk = qkm1 + qkm2 * xk; + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + if (qk != 0) + r = pk / qk; + if (r != 0) { + t = fabs((ans - r) / r); + ans = r; + } else { + t = 1.0; + } + + if (t < thresh) { + return ans; + } + + k1 += 1.0; + k2 += 1.0; + k3 += 2.0; + k4 += 2.0; + k5 += 1.0; + k6 -= 1.0; + k7 += 2.0; + k8 += 2.0; + + if ((fabs(qk) + fabs(pk)) > big) { + pkm2 *= biginv; + pkm1 *= biginv; + qkm2 *= biginv; + qkm1 *= biginv; + } + if ((fabs(qk) < biginv) || (fabs(pk) < biginv)) { + pkm2 *= big; + pkm1 *= big; + qkm2 *= big; + qkm1 *= big; + } + } + while (++n < 300); + + return ans; +} + + + +/* Continued fraction expansion #2 + * for incomplete beta integral + */ + +__noinline__ __device__ static double incbd(double a, double b, double x) +{ + double xk, pk, pkm1, pkm2, qk, qkm1, qkm2; + double k1, k2, k3, k4, k5, k6, k7, k8; + double r, t, ans, z, thresh; + int n; + + k1 = a; + k2 = b - 1.0; + k3 = a; + k4 = a + 1.0; + k5 = 1.0; + k6 = a + b; + k7 = a + 1.0;; + k8 = a + 2.0; + + pkm2 = 0.0; + qkm2 = 1.0; + pkm1 = 1.0; + qkm1 = 1.0; + z = x / (1.0 - x); + ans = 1.0; + r = 1.0; + n = 0; + thresh = 3.0 * MACHEP; + do { + + xk = -(z * k1 * k2) / (k3 * k4); + pk = pkm1 + pkm2 * xk; + qk = qkm1 + qkm2 * xk; + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + xk = (z * k5 * k6) / (k7 * k8); + pk = pkm1 + pkm2 * xk; + qk = qkm1 + qkm2 * xk; + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + if (qk != 0){ + r = pk / qk; + } + if (r != 0) { + t = fabs((ans - r) / r); + ans = r; + } else { + t = 1.0; + } + + if (t < thresh) { + return ans; + } + + k1 += 1.0; + k2 -= 1.0; + k3 += 2.0; + k4 += 2.0; + k5 += 1.0; + k6 += 1.0; + k7 += 2.0; + k8 += 2.0; + + if ((fabs(qk) + fabs(pk)) > big) { + pkm2 *= biginv; + pkm1 *= biginv; + qkm2 *= biginv; + qkm1 *= biginv; + } + if ((fabs(qk) < biginv) || (fabs(pk) < biginv)) { + pkm2 *= big; + pkm1 *= big; + qkm2 *= big; + qkm1 *= big; + } + } + while (++n < 300); + + return ans; +} + + +/* Power series for incomplete beta integral. + * Use when b*x is small and x not too close to 1. */ + +__noinline__ __device__ static double pseries(double a, double b, double x) +{ + double s, t, u, v, n, t1, z, ai; + + ai = 1.0 / a; + u = (1.0 - b) * x; + v = u / (a + 1.0); + t1 = v; + t = u; + n = 2.0; + s = 0.0; + z = MACHEP * ai; + while (fabs(v) > z) { + u = (n - b) * x / n; + t *= u; + v = t / (a + n); + s += v; + n += 1.0; + } + s += t1; + s += ai; + + u = a * log(x); + if ((a + b) < MAXGAM && fabs(u) < MAXLOG) { + t = 1.0 / beta(a, b); + s = s * t * pow(x, a); + } else { + t = -lbeta(a,b) + u + log(s); + if (t < MINLOG) { + s = 0.0; + } else { + s = exp(t); + } + } + return (s); +} + + +""" + +incbet_preamble = ( + beta_preamble + + gamma_definition + + polevl_definition + + p1evl_definition + + lgam_sgn_definition + + lbeta_symp_definition + + beta_definition + + lbeta_definition + + incbet_definition +) + + +betainc = _core.create_ufunc( + "cupyx_scipy_betainc", + ("fff->f", "ddd->d"), + "out0 = out0_type(incbet(in0, in1, in2));", + preamble=incbet_preamble, + doc="""Incomplete beta function. + + Parameters + ---------- + a, b : cupy.ndarray + Positive, real-valued parameters + x : cupy.ndarray + Real-valued such that 0 <= x <= 1, the upper limit of integration. + out : ndarray, optional + Optional output array for the function result + + Returns + ------- + scalar or ndarray + Value of the incomplete beta function + + See Also + -------- + :func:`scipy.special.betainc` + + """, +) + + +incbi_definition = """ + +__noinline__ __device__ double incbi(double aa, double bb, double yy0) +{ + double a, b, y0, d, y, x, x0, x1, lgm, yp, di, dithresh, yl, yh, xt; + int i, rflg, dir, nflg; + + i = 0; + if (yy0 <= 0) { + return 0.0; + } + if (yy0 >= 1.0) { + return 1.0; + } + x0 = 0.0; + yl = 0.0; + x1 = 1.0; + yh = 1.0; + nflg = 0; + + if (aa <= 1.0 || bb <= 1.0) { + dithresh = 1.0e-6; + rflg = 0; + a = aa; + b = bb; + y0 = yy0; + x = a / (a + b); + y = incbet(a, b, x); + goto ihalve; + } + else { + dithresh = 1.0e-4; + } + /* approximation to inverse function */ + + // normcdfinv is the CUDA Math API equivalent of cephes ndtri + yp = -normcdfinv(yy0); + + if (yy0 > 0.5) { + rflg = 1; + a = bb; + b = aa; + y0 = 1.0 - yy0; + yp = -yp; + } + else { + rflg = 0; + a = aa; + b = bb; + y0 = yy0; + } + + lgm = (yp * yp - 3.0) / 6.0; + x = 2.0 / (1.0 / (2.0 * a - 1.0) + 1.0 / (2.0 * b - 1.0)); + d = yp * sqrt(x + lgm) / x + - (1.0 / (2.0 * b - 1.0) - 1.0 / (2.0 * a - 1.0)) + * (lgm + 5.0 / 6.0 - 2.0 / (3.0 * x)); + d = 2.0 * d; + if (d < MINLOG) { + x = 1.0; + goto under; + } + x = a / (a + b * exp(d)); + y = incbet(a, b, x); + yp = (y - y0) / y0; + if (fabs(yp) < 0.2) { + goto newt; + } + + /* Resort to interval halving if not close enough. */ +ihalve: + + dir = 0; + di = 0.5; + for (i = 0; i < 100; i++) { + if (i != 0) { + x = x0 + di * (x1 - x0); + if (x == 1.0) { + x = 1.0 - MACHEP; + } + if (x == 0.0) { + di = 0.5; + x = x0 + di * (x1 - x0); + if (x == 0.0) + goto under; + } + y = incbet(a, b, x); + yp = (x1 - x0) / (x1 + x0); + if (fabs(yp) < dithresh) { + goto newt; + } + yp = (y - y0) / y0; + if (fabs(yp) < dithresh) { + goto newt; + } + } + if (y < y0) { + x0 = x; + yl = y; + if (dir < 0) { + dir = 0; + di = 0.5; + } else if (dir > 3) { + di = 1.0 - (1.0 - di) * (1.0 - di); + } else if (dir > 1) { + di = 0.5 * di + 0.5; + } else { + di = (y0 - y) / (yh - yl); + } + dir += 1; + if (x0 > 0.75) { + if (rflg == 1) { + rflg = 0; + a = aa; + b = bb; + y0 = yy0; + } else { + rflg = 1; + a = bb; + b = aa; + y0 = 1.0 - yy0; + } + x = 1.0 - x; + y = incbet(a, b, x); + x0 = 0.0; + yl = 0.0; + x1 = 1.0; + yh = 1.0; + goto ihalve; + } + } + else { + x1 = x; + if (rflg == 1 && x1 < MACHEP) { + x = 0.0; + goto done; + } + yh = y; + if (dir > 0) { + dir = 0; + di = 0.5; + } else if (dir < -3) { + di = di * di; + } else if (dir < -1) { + di = 0.5 * di; + } else { + di = (y - y0) / (yh - yl); + } + dir -= 1; + } + } + if (x0 >= 1.0) { + x = 1.0 - MACHEP; + goto done; + } + if (x <= 0.0) { +under: + x = 0.0; + goto done; + } + +newt: + + if (nflg) { + goto done; + } + nflg = 1; + lgm = lgam(a + b) - lgam(a) - lgam(b); + + for (i = 0; i < 8; i++) { + /* Compute the function at this point. */ + if (i != 0) { + y = incbet(a, b, x); + } + if (y < yl) { + x = x0; + y = yl; + } else if (y > yh) { + x = x1; + y = yh; + } else if (y < y0) { + x0 = x; + yl = y; + } else { + x1 = x; + yh = y; + } + if (x == 1.0 || x == 0.0) + break; + /* Compute the derivative of the function at this point. */ + d = (a - 1.0) * log(x) + (b - 1.0) * log(1.0 - x) + lgm; + if (d < MINLOG) { + goto done; + } + if (d > MAXLOG) { + break; + } + d = exp(d); + /* Compute the step to the next approximation of x. */ + d = (y - y0) / d; + xt = x - d; + if (xt <= x0) { + y = (x - x0) / (x1 - x0); + xt = x0 + 0.5 * y * (x - x0); + if (xt <= 0.0) { + break; + } + } + if (xt >= x1) { + y = (x1 - x) / (x1 - x0); + xt = x1 - 0.5 * y * (x1 - x); + if (xt >= 1.0) { + break; + } + } + x = xt; + if (fabs(d / x) < 128.0 * MACHEP) { + goto done; + } + } + /* Did not converge. */ + dithresh = 256.0 * MACHEP; + goto ihalve; + +done: + + if (rflg) { + if (x <= MACHEP) + x = 1.0 - MACHEP; + else + x = 1.0 - x; + } + return x; +} + +""" + +incbi_preamble = incbet_preamble + incbi_definition + + +betaincinv = _core.create_ufunc( + "cupyx_scipy_betaincinv", + ("fff->f", "ddd->d"), + "out0 = out0_type(incbi(in0, in1, in2));", + preamble=incbi_preamble, + doc="""Inverse of the incomplete beta function. + + Parameters + ---------- + a, b : cupy.ndarray + Positive, real-valued parameters + y : cupy.ndarray + Real-valued input. + out : ndarray, optional + Optional output array for the function result + + Returns + ------- + scalar or ndarray + Value of the inverse of the incomplete beta function + + See Also + -------- + :func:`scipy.special.betaincinv` + + """, +) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_complexstuff.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_complexstuff.py new file mode 100644 index 0000000000000000000000000000000000000000..41512fdf64dc279d6e68904a7d3d06a24f0e570c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_complexstuff.py @@ -0,0 +1,54 @@ +"""complex-valued functions adapted from SciPy's cython code: + +https://github.com/scipy/scipy/blob/master/scipy/special/_complexstuff.pxd + +Notes: +- instead of zabs, use thrust::abs +- instead of zarg, use thrust::arg +- instead of zcos, use thrust::cos +- instead of zexp, use thrust::exp +- instead of zisfinite, use isfinite defined in _core/include/cupy/complex.cuh +- instead of zisinf, use isinf defined in _core/include/cupy/complex.cuh +- instead of zisnan, use isnan defined in _core/include/cupy/complex.cuh +- instead of zpack, use complex(real, imag) +- instead of zpow, use thrust::pow +- instead of zreal, use z.real() +- instead of zsin, use thrust::sin +- instead of zsqrt, use thrust::sqrt + +""" + + +zlog1_definition = """ + +/* Compute log, paying special attention to accuracy around 1. We + * implement this ourselves because some systems (most notably the + * Travis CI machines) are weak in this regime. + */ + +#define TOL_ZLOG1 2.220446092504131e-16 + + +__device__ complex zlog1(complex z) +{ + complex coeff = -1.0; + complex res = 0.0; + + if (abs(z - 1.0) > 0.1) { + return log(z); // complex log via Thrust + } + z = z - 1.0; + if (z == 0.0) { + return 0; + } + for (int n=1; n<17; n++) + { + coeff *= -z; + res += coeff / complex(n, 0); + if (abs(res/coeff) < TOL_ZLOG1) { + break; + } + } + return res; +} +""" diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_convex_analysis.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_convex_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..5e60c30ac573a39d7b0bdb02a88bb817a8a61087 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_convex_analysis.py @@ -0,0 +1,120 @@ +from cupy import _core + + +_float_preamble = ''' +#include + +double __device__ entr(double x) { + if (isnan(x)) { + return CUDART_NAN; + } else if (x > 0){ + return -x * log(x); + } else if (x == 0){ + return 0; + } else { + return -CUDART_INF; + } +} + +double __device__ kl_div(double x, double y) { + if (isnan(x) || isnan(y)) { + return CUDART_NAN; + } else if (x > 0 && y > 0) { + return x * log(x / y) - x + y; + } else if (x == 0 && y >= 0) { + return y; + } else { + return CUDART_INF; + } +} + +double __device__ rel_entr(double x, double y) { + if (isnan(x) || isnan(y)) { + return CUDART_NAN; + } else if (x > 0 && y > 0) { + return x * log(x / y); + } else if (x == 0 && y >= 0) { + return 0; + } else { + return CUDART_INF; + } +} + +double __device__ huber(double delta, double r) { + if (delta < 0) { + return CUDART_INF; + } else if (abs(r) <= delta) { + return 0.5 * r * r; + } else { + return delta * (abs(r) - 0.5 * delta); + } +} + +double __device__ pseudo_huber(double delta, double r) { + if (delta < 0) { + return CUDART_INF; + } else if (delta == 0 || r == 0) { + return 0; + } else { + double u = delta; + double v = r / delta; + return u * u * (sqrt(1 + v * v) - 1); + } +} + +''' + + +entr = _core.create_ufunc( + 'cupyx_scipy_special_entr', ('f->f', 'd->d'), + 'out0 = out0_type(entr(in0));', + preamble=_float_preamble, + doc='''Elementwise function for computing entropy. + + .. seealso:: :meth:`scipy.special.entr` + + ''') + + +kl_div = _core.create_ufunc( + 'cupyx_scipy_special_kl_div', ('ff->f', 'dd->d'), + 'out0 = out0_type(kl_div(in0, in1));', + preamble=_float_preamble, + doc='''Elementwise function for computing Kullback-Leibler divergence. + + .. seealso:: :meth:`scipy.special.kl_div` + + ''') + + +rel_entr = _core.create_ufunc( + 'cupyx_scipy_special_rel_entr', ('ff->f', 'dd->d'), + 'out0 = out0_type(rel_entr(in0, in1));', + preamble=_float_preamble, + doc='''Elementwise function for computing relative entropy. + + .. seealso:: :meth:`scipy.special.rel_entr` + + ''') + + +huber = _core.create_ufunc( + 'cupyx_scipy_special_huber', ('ff->f', 'dd->d'), + 'out0 = out0_type(huber(in0, in1));', + preamble=_float_preamble, + doc='''Elementwise function for computing the Huber loss. + + .. seealso:: :meth:`scipy.special.huber` + + ''') + + +pseudo_huber = _core.create_ufunc( + 'cupyx_scipy_special_pseudo_huber', ('ff->f', 'dd->d'), + 'out0 = out0_type(pseudo_huber(in0, in1));', + preamble=_float_preamble, + doc='''Elementwise function for computing the Pseudo-Huber loss. + + .. seealso:: :meth:`scipy.special.pseudo_huber` + + ''') diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_ellip.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_ellip.py new file mode 100644 index 0000000000000000000000000000000000000000..9f803065d6b337e95da5ddda409fbe065f0edd79 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_ellip.py @@ -0,0 +1,234 @@ +# This source code contains SciPy's code. +# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ellipk.c +# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ellipk.pxd +# +# +# Cephes Math Library Release 2.8: June, 2000 +# Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier + +from cupy import _core +from cupyx.scipy.special._digamma import polevl_definition + +ellpk_definition = """ +#include + +__constant__ double P[] = { + 1.37982864606273237150E-4, + 2.28025724005875567385E-3, + 7.97404013220415179367E-3, + 9.85821379021226008714E-3, + 6.87489687449949877925E-3, + 6.18901033637687613229E-3, + 8.79078273952743772254E-3, + 1.49380448916805252718E-2, + 3.08851465246711995998E-2, + 9.65735902811690126535E-2, + 1.38629436111989062502E0 +}; + +__constant__ double Q[] = { + 2.94078955048598507511E-5, + 9.14184723865917226571E-4, + 5.94058303753167793257E-3, + 1.54850516649762399335E-2, + 2.39089602715924892727E-2, + 3.01204715227604046988E-2, + 3.73774314173823228969E-2, + 4.88280347570998239232E-2, + 7.03124996963957469739E-2, + 1.24999999999870820058E-1, + 4.99999999999999999821E-1 +}; + +__constant__ double C1 = 1.3862943611198906188E0; /* log(4) */ + + +static __device__ double ellpk(double x) +{ + double MACHEP = 1.11022302462515654042E-16; /* 2**-53 */ + + + if (x < 0.0) { + return (CUDART_NAN); + } + + if (x > 1.0) { + if (isinf(x)) { + return 0.0; + } + return ellpk(1/x)/sqrt(x); + } + + if (x > MACHEP) { + return (polevl<10>(x, P) - log(x) * polevl<10>(x, Q)); + } + else { + if (x == 0.0) { + return (CUDART_INF); + } + else { + return (C1 - 0.5 * log(x)); + } + } +} + + +static __device__ double ellpkm1(double x) +{ + return ellpk(1 - x); +} + +""" + +ellipkm1 = _core.create_ufunc( + 'cupyx_scipy_special_ellipk', + ('f->f', 'd->d'), + 'out0 = ellpk(in0)', + preamble=polevl_definition+ellpk_definition, + doc="""ellpkm1. + + Args: + x (cupy.ndarray): The input of digamma function. + + Returns: + cupy.ndarray: Computed value of digamma function. + + .. seealso:: :data:`scipy.special.digamma` + + """) + + +ellipk = _core.create_ufunc( + 'cupyx_scipy_special_ellipkm1', + ("f->f", "d->d"), + 'out0 = ellpkm1(in0)', + preamble=polevl_definition+ellpk_definition, + doc="""ellpk. + + Args: + x (cupy.ndarray): The input of digamma function. + + Returns: + cupy.ndarray: Computed value of digamma function. + + .. seealso:: :data:`scipy.special.digamma` + + """) + + +ellipj_preamble = """ +#include + +__constant__ double M_PI_2 = 1.57079632679489661923; + +static __device__ double ellipj(double u, double m, double* sn, + double* cn, double *dn, double *ph) +{ + + double MACHEP = 1.11022302462515654042E-16; /* 2**-53 */ + + double ai, b, phi, t, twon, dnfac; + double a[9], c[9]; + int i; + + /* Check for special cases */ + if (m < 0.0 || m > 1.0 || isnan(m)) { + *sn = CUDART_NAN; + *cn = CUDART_NAN; + *ph = CUDART_NAN; + *dn = CUDART_NAN; + return (-1); + } + if (m < 1.0e-9) { + t = sin(u); + b = cos(u); + ai = 0.25 * m * (u - t * b); + *sn = t - ai * b; + *cn = b + ai * t; + *ph = u - ai; + *dn = 1.0 - 0.5 * m * t * t; + return (0); + } + if (m >= 0.9999999999) { + ai = 0.25 * (1.0 - m); + b = cosh(u); + t = tanh(u); + phi = 1.0 / b; + twon = b * sinh(u); + *sn = t + ai * (twon - u) / (b * b); + *ph = 2.0 * atan(exp(u)) - M_PI_2 + ai * (twon - u) / b; + ai *= t * phi; + *cn = phi - ai * (twon - u); + *dn = phi + ai * (twon + u); + return (0); + } + + /* A. G. M. scale. See DLMF 22.20(ii) */ + a[0] = 1.0; + b = sqrt(1.0 - m); + c[0] = sqrt(m); + twon = 1.0; + i = 0; + + while (fabs(c[i] / a[i]) > MACHEP) { + if (i > 7) { + goto done; + } + ai = a[i]; + ++i; + c[i] = (ai - b) / 2.0; + t = sqrt(ai * b); + a[i] = (ai + b) / 2.0; + b = t; + twon *= 2.0; + } + + done: + /* backward recurrence */ + phi = twon * a[i] * u; + do { + t = c[i] * sin(phi) / a[i]; + b = phi; + phi = (asin(t) + phi) / 2.0; + } + while (--i); + + *sn = sin(phi); + t = cos(phi); + *cn = t; + dnfac = cos(phi - b); + /* See discussion after DLMF 22.20.5 */ + if (fabs(dnfac) < 0.1) { + *dn = sqrt(1 - m*(*sn)*(*sn)); + } + else { + *dn = t / dnfac; + } + *ph = phi; + return (0); +} + +""" + + +ellipj = _core.create_ufunc( + 'cupyx_scipy_special_ellipj', + ('ff->ffff', 'dd->dddd'), + ''' + double sn, cn, dn, ph; ellipj(in0, in1, &sn, &cn, &dn, &ph); + out0 = sn; out1 = cn; out2 = dn; out3 = ph; + ''', + preamble=ellipj_preamble, + doc="""ellipj + + Args: + u (cupy.ndarray): The input of ellipj function. + m (cupy.ndarray): The input of ellipj function. + + + Returns: + sn, cn, dn, ph: Computed values. + + .. seealso:: :data:`scipy.special.ellipj` + """ +) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_expn.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_expn.py new file mode 100644 index 0000000000000000000000000000000000000000..f30605d80d26e71c40b7b2bddd7033fbb33d38f8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_expn.py @@ -0,0 +1,359 @@ +# This source code contains SciPy's code. +# https://github.com/scipy/scipy/blob/main/scipy/special/cephes/expn.c +# +# +# Cephes Math Library Release 1.1: March, 1985 +# Copyright 1985 by Stephen L. Moshier +# Direct inquiries to 30 Frost Street, Cambridge, MA 02140 + +from cupy import _core + +from cupyx.scipy.special._gamma import gamma_definition + + +polevl_definition = ''' + +__device__ double polevl(double x, double coef[], int N) +{ + double ans; + double *p; + p = coef; + ans = *p++; + for (int i = 0; i < N; ++i){ + ans = ans * x + *p++; + } + return ans; +} + +''' + + +expn_large_n_definition = ''' + +__constant__ double EUL = 0.57721566490153286060; +__constant__ double BIG = 1.44115188075855872E+17; +__constant__ double MACHEP = 1.11022302462515654042E-16; +__constant__ double MAXLOG = 7.08396418532264106224E2; + +#define nA 13 + +__constant__ double _A0[] = { + 1.00000000000000000 +}; + +__constant__ double _A1[] = { + 1.00000000000000000 +}; + +__constant__ double _A2[] = { + -2.00000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A3[] = { + 6.00000000000000000, + -8.00000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A4[] = { + -24.0000000000000000, + 58.0000000000000000, + -22.0000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A5[] = { + 120.000000000000000, + -444.000000000000000, + 328.000000000000000, + -52.0000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A6[] = { + -720.000000000000000, + 3708.00000000000000, + -4400.00000000000000, + 1452.00000000000000, + -114.000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A7[] = { + 5040.00000000000000, + -33984.0000000000000, + 58140.0000000000000, + -32120.0000000000000, + 5610.00000000000000, + -240.000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A8[] = { + -40320.0000000000000, + 341136.000000000000, + -785304.000000000000, + 644020.000000000000, + -195800.000000000000, + 19950.0000000000000, + -494.000000000000000, + 1.00000000000000000 +}; + +__constant__ double _A9[] = { + 362880.000000000000, + -3733920.00000000000, + 11026296.0000000000, + -12440064.0000000000, + 5765500.00000000000, + -1062500.00000000000, + 67260.0000000000000, + -1004.00000000000000, + 1.00000000000000000 +}; + +__constant__ double _A10[] = { + -3628800.00000000000, + 44339040.0000000000, + -162186912.000000000, + 238904904.000000000, + -155357384.000000000, + 44765000.0000000000, + -5326160.00000000000, + 218848.000000000000, + -2026.00000000000000, + 1.00000000000000000 +}; + +__constant__ double _A11[] = { + 39916800.0000000000, + -568356480.000000000, + 2507481216.00000000, + -4642163952.00000000, + 4002695088.00000000, + -1648384304.00000000, + 314369720.000000000, + -25243904.0000000000, + 695038.000000000000, + -4072.00000000000000, + 1.00000000000000000 +}; + +__constant__ double _A12[] = { + -479001600.000000000, + 7827719040.00000000, + -40788301824.0000000, + 92199790224.0000000, + -101180433024.000000, + 56041398784.0000000, + -15548960784.0000000, + 2051482776.00000000, + -114876376.000000000, + 2170626.00000000000, + -8166.00000000000000, + 1.00000000000000000 +}; + +__constant__ double *A[] = { + _A0, _A1, _A2, + _A3, _A4, _A5, + _A6, _A7, _A8, + _A9, _A10, _A11, + _A12 +}; + +__constant__ int Adegs[] = { + 0, 0, 1, + 2, 3, 4, + 5, 6, 7, + 8, 9, 10, + 11 +}; + +/* Asymptotic expansion for large n, DLMF 8.20(ii) */ +__device__ double expn_large_n(int n, double x) +{ + int k; + double p = n; + double lambda = x/p; + double multiplier = 1/p/(lambda + 1)/(lambda + 1); + double fac = 1; + double res = 1; /* A[0] = 1 */ + double expfac, term; + + expfac = exp(-lambda*p)/(lambda + 1)/p; + if (expfac == 0) { + return 0; + } + + /* Do the k = 1 term outside the loop since A[1] = 1 */ + fac *= multiplier; + res += fac; + + for (k = 2; k < nA; k++) { + fac *= multiplier; + term = fac*polevl(lambda, A[k], Adegs[k]); + res += term; + if (fabs(term) < MACHEP*fabs(res)) { + break; + } + } + + return expfac*res; +} + +''' + + +expn_definition = ( + polevl_definition + + gamma_definition + + expn_large_n_definition + + ''' + +// include for CUDART_NAN, CUDART_INF +#include + +__device__ double expn(int n, double x) +{ + double ans, r, t, yk, xk; + double pk, pkm1, pkm2, qk, qkm1, qkm2; + double psi, z; + int i, k; + double big = BIG; + + if (isnan(x)) { + return CUDART_NAN; + } else if (n < 0 || x < 0) { + return CUDART_NAN; + } + + if (x > MAXLOG) { + return (0.0); + } + + if (x == 0.0) { + if (n < 2) { + return CUDART_INF; + } else { + return (1.0 / (n - 1.0)); + } + } + + if (n == 0) { + return (exp(-x) / x); + } + + /* Asymptotic expansion for large n, DLMF 8.20(ii) */ + if (n > 50) { + ans = expn_large_n(n, x); + return (ans); + } + + /* Continued fraction, DLMF 8.19.17 */ + if (x > 1.0) { + k = 1; + pkm2 = 1.0; + qkm2 = x; + pkm1 = 1.0; + qkm1 = x + n; + ans = pkm1 / qkm1; + + do { + k += 1; + if (k & 1) { + yk = 1.0; + xk = n + (k - 1) / 2; + } else { + yk = x; + xk = k / 2; + } + pk = pkm1 * yk + pkm2 * xk; + qk = qkm1 * yk + qkm2 * xk; + if (qk != 0) { + r = pk / qk; + t = fabs((ans - r) / r); + ans = r; + } else { + t = 1.0; + } + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + if (fabs(pk) > big) { + pkm2 /= big; + pkm1 /= big; + qkm2 /= big; + qkm1 /= big; + } + } while (t > MACHEP); + + ans *= exp(-x); + return ans; + } + + /* Power series expansion, DLMF 8.19.8 */ + psi = -EUL - log(x); + for (i = 1; i < n; i++) { + psi = psi + 1.0 / i; + } + + z = -x; + xk = 0.0; + yk = 1.0; + pk = 1.0 - n; + if (n == 1) { + ans = 0.0; + } else { + ans = 1.0 / pk; + } + do { + xk += 1.0; + yk *= z / xk; + pk += 1.0; + if (pk != 0.0) { + ans += yk / pk; + } + if (ans != 0.0) + t = fabs(yk / ans); + else + t = 1.0; + } while (t > MACHEP); + k = xk; + t = n; + r = n - 1; + ans = (pow(z, r) * psi / Gamma(t)) - ans; + return (ans); +} + +''' +) + + +expn = _core.create_ufunc( + 'cupyx_scipy_special_expn', + ('ff->f', 'dd->d'), + 'out0 = expn(in0, in1)', + preamble=expn_definition, + doc="""Generalized exponential integral En. + + Parameters + ---------- + n : cupy.ndarray + Non-negative integers + x : cupy.ndarray + Real argument + + Returns + ------- + y : scalar or cupy.ndarray + Values of the generalized exponential integral + + See Also + -------- + :func:`scipy.special.expn` + + """, +) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_gammasgn.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_gammasgn.py new file mode 100644 index 0000000000000000000000000000000000000000..3f24e07f74c6c1babe162dc980f2332c1e918369 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_gammasgn.py @@ -0,0 +1,51 @@ +""" +The source code here is an adaptation with minimal changes from the following +file in SciPy's bundled Cephes library: + +https://github.com/scipy/scipy/blob/master/scipy/special/cephes/gammasgn.c + +Cephes Math Library Release 2.0: April, 1987 +Copyright 1984, 1987 by Stephen L. Moshier +Direct inquiries to 30 Frost Street, Cambridge, MA 02140 +""" + +from cupy import _core + + +gammasgn_definition = """ +__device__ double gammasgn(double x) +{ + double fx; + + if (isnan(x)) { + return x; + } + if (x > 0) { + return 1.0; + } + else { + fx = floor(x); + if (x - fx == 0.0) { + return 0.0; + } + else if ((int)fx % 2) { + return -1.0; + } + else { + return 1.0; + } + } +} +""" + +gammasgn = _core.create_ufunc( + "cupyx_scipy_gammasgn", + ("f->f", "d->d"), + "out0 = out0_type(gammasgn(in0));", + preamble=gammasgn_definition, + doc="""Elementwise function for scipy.special.gammasgn + + .. seealso:: :meth:`scipy.special.gammasgn` + + """, +) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_logsoftmax.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_logsoftmax.py new file mode 100644 index 0000000000000000000000000000000000000000..da582b80768b27eabad94b85f2519db301f9098d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_logsoftmax.py @@ -0,0 +1,53 @@ +import cupy as cp + + +_log_softmax_kernel = cp._core.ReductionKernel( + 'T x1', + 'T y', + 'exp(x1)', + 'a + b', + 'y = log(a)', + '0', + name='log_softmax' +) + + +def log_softmax(x, axis=None): + """Compute logarithm of softmax function + + Parameters + ---------- + x : array-like + Input array + axis : int or tuple of ints, optional + Axis to compute values along. Default is None and softmax + will be computed over the entire array `x` + + Returns + ------- + s : cupy.ndarry + An array with the same shape as `x`. Exponential of the + result will sum to 1 along the specified axis. If `x` is a + scalar, a scalar is returned + + """ + + x_max = cp.amax(x, axis=axis, keepdims=True) + + if x_max.ndim > 0: + x_max[~cp.isfinite(x_max)] = 0 + elif not cp.isfinite(x_max): + x_max = 0 + + tmp = x - x_max + + if tmp.dtype.kind in 'iu': + for out_dtype in [cp.float16, cp.float32, cp.float64]: + if cp.can_cast(tmp.dtype, out_dtype): + tmp = tmp.astype(out_dtype) + break + + out = _log_softmax_kernel(tmp, axis=axis, keepdims=True) + + out = tmp - out + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_poch.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_poch.py new file mode 100644 index 0000000000000000000000000000000000000000..ef88b8f6aae0b5fd0fced7cf1872706831d11d2a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_poch.py @@ -0,0 +1,119 @@ +""" +The source code here is an adaptation with minimal changes from the following +files in SciPy's bundled Cephes library: + +https://github.com/scipy/scipy/blob/master/scipy/special/cephes/poch.c + +Cephes Math Library Release 2.0: April, 1987 +Copyright 1984, 1987 by Stephen L. Moshier +Direct inquiries to 30 Frost Street, Cambridge, MA 02140 +""" + +from cupy import _core + +from cupyx.scipy.special._gammasgn import gammasgn_definition +from cupyx.scipy.special._gammainc import lgam_definition + + +poch_definition = ( + lgam_definition + + gammasgn_definition + + """ +/* + * Pochhammer symbol (a)_m = gamma(a + m) / gamma(a) + */ + +// include for CUDART_NAN, CUDART_INF +#include + + +__device__ double is_nonpos_int(double x) +{ + return x <= 0 && x == ceil(x) && fabs(x) < 1e13; +} + +__device__ double poch(double a, double m) +{ + double r; + + r = 1.0; + + /* + * 1. Reduce magnitude of `m` to |m| < 1 by using recurrence relations. + * + * This may end up in over/underflow, but then the function itself either + * diverges or goes to zero. In case the remainder goes to the opposite + * direction, we end up returning 0*INF = NAN, which is OK. + */ + + /* Recurse down */ + while (m >= 1.0) { + if (a + m == 1) { + break; + } + m -= 1.0; + r *= (a + m); + if (!isfinite(r) || r == 0) { + break; + } + } + + /* Recurse up */ + while (m <= -1.0) { + if (a + m == 0) { + break; + } + r /= (a + m); + m += 1.0; + if (!isfinite(r) || r == 0) { + break; + } + } + + /* + * 2. Evaluate function with reduced `m` + * + * Now either `m` is not big, or the `r` product has over/underflown. + * If so, the function itself does similarly. + */ + + if (m == 0) { + /* Easy case */ + return r; + } + else if (a > 1e4 && fabs(m) <= 1) { + /* Avoid loss of precision */ + return r * pow(a, m) * ( + 1 + + m*(m-1)/(2*a) + + m*(m-1)*(m-2)*(3*m-1)/(24*a*a) + + m*m*(m-1)*(m-1)*(m-2)*(m-3)/(48*a*a*a) + ); + } + + /* Check for infinity */ + if (is_nonpos_int(a + m) && !is_nonpos_int(a) && a + m != m) { + return CUDART_INF; + } + + /* Check for zero */ + if (!is_nonpos_int(a + m) && is_nonpos_int(a)) { + return 0; + } + + return r * exp(lgam(a + m) - lgam(a)) * gammasgn(a + m) * gammasgn(a); +} +""" +) + +poch = _core.create_ufunc( + "cupyx_scipy_poch", + ("ff->f", "dd->d"), + "out0 = out0_type(poch(in0, in1));", + preamble=poch_definition, + doc="""Elementwise function for scipy.special.poch (Pochhammer symbol) + + .. seealso:: :meth:`scipy.special.poch` + + """, +) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_polygamma.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_polygamma.py new file mode 100644 index 0000000000000000000000000000000000000000..44451a811cea35df10fc9c5edf73a44ed79eee55 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_polygamma.py @@ -0,0 +1,22 @@ +import cupy +from cupyx.scipy.special import _digamma +from cupyx.scipy.special import _gamma +from cupyx.scipy.special import _zeta + + +def polygamma(n, x): + """Polygamma function n. + + Args: + n (cupy.ndarray): The order of the derivative of `psi`. + x (cupy.ndarray): Where to evaluate the polygamma function. + + Returns: + cupy.ndarray: The result. + + .. seealso:: :data:`scipy.special.polygamma` + + """ + n, x = cupy.broadcast_arrays(n, x) + fac2 = (-1.0)**(n+1) * _gamma.gamma(n+1.0) * _zeta.zeta(n+1.0, x) + return cupy.where(n == 0, _digamma.digamma(x), fac2) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_zeta.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_zeta.py new file mode 100644 index 0000000000000000000000000000000000000000..caff3b447b2869173ea448315bbd213fff3ad2ef --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_zeta.py @@ -0,0 +1,129 @@ +# This source code contains SciPy's code. +# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/zeta.c +# +# +# Cephes Math Library Release 2.0: April, 1987 +# Copyright 1984, 1987 by Stephen L. Moshier +# Direct inquiries to 30 Frost Street, Cambridge, MA 02140 + +# TODO(YoshikawaMasashi): float implementation of zeta function + +from cupy import _core + + +zeta_definition = ''' +/* Expansion coefficients + * for Euler-Maclaurin summation formula + * (2k)! / B2k + * where B2k are Bernoulli numbers + */ +__constant__ double A[] = { + 12.0, + -720.0, + 30240.0, + -1209600.0, + 47900160.0, + -1.8924375803183791606e9, /*1.307674368e12/691 */ + 7.47242496e10, + -2.950130727918164224e12, /*1.067062284288e16/3617 */ + 1.1646782814350067249e14, /*5.109094217170944e18/43867 */ + -4.5979787224074726105e15, /*8.028576626982912e20/174611 */ + 1.8152105401943546773e17, /*1.5511210043330985984e23/854513 */ + -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091 */ +}; + +__constant__ double MACHEP = 1.11022302462515654042E-16; + +/* 30 Nov 86 -- error in third coefficient fixed */ + + +double __device__ zeta(double x, double q) +{ + int i; + double a, b, k, s, t, w; + + if (x == 1.0){ + return 1.0/0.0; + } + + if (x < 1.0) { + return nan(""); + } + + if (q <= 0.0) { + if (q == floor(q)) { + return 1.0/0.0; + } + if (x != floor(x)){ + return nan(""); /* because q^-x not defined */ + } + } + + /* Asymptotic expansion + * http://dlmf.nist.gov/25.11#E43 + */ + if (q > 1e8) { + return (1/(x - 1) + 1/(2*q)) * pow(q, 1 - x); + } + + /* Euler-Maclaurin summation formula */ + + /* Permit negative q but continue sum until n+q > +9 . + * This case should be handled by a reflection formula. + * If q<0 and x is an integer, there is a relation to + * the polyGamma function. + */ + s = pow(q, -x); + a = q; + i = 0; + b = 0.0; + while ((i < 9) || (a <= 9.0)) { + i += 1; + a += 1.0; + b = pow(a, -x); + s += b; + if (fabs(b / s) < MACHEP){ + return s; + } + } + + w = a; + s += b * w / (x - 1.0); + s -= 0.5 * b; + a = 1.0; + k = 0.0; + for (i = 0; i < 12; i++) { + a *= x + k; + b /= w; + t = a * b / A[i]; + s = s + t; + t = fabs(t / s); + if (t < MACHEP){ + return s; + } + k += 1.0; + a *= x + k; + b /= w; + k += 1.0; + } + return s; +} +''' + + +zeta = _core.create_ufunc( + 'cupyx_scipy_special_zeta', ('ff->f', 'dd->d'), + 'out0 = zeta(in0, in1)', + preamble=zeta_definition, + doc="""Hurwitz zeta function. + + Args: + x (cupy.ndarray): Input data, must be real. + q (cupy.ndarray): Input data, must be real. + + Returns: + cupy.ndarray: Values of zeta(x, q). + + .. seealso:: :data:`scipy.special.zeta` + + """) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_zetac.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_zetac.py new file mode 100644 index 0000000000000000000000000000000000000000..349631ca0cbb207a417b09ff6b1487209bb593a3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/special/_zetac.py @@ -0,0 +1,317 @@ +""" +The source code here is an adaptation with minimal changes from the following +files in SciPy's bundled Cephes library: + +https://github.com/scipy/scipy/blob/master/scipy/special/cephes/zetac.c + +Cephes Math Library Release 2.0: April, 1987 +Copyright 1984, 1987 by Stephen L. Moshier +Direct inquiries to 30 Frost Street, Cambridge, MA 02140 +""" + + +from cupy import _core + +from cupyx.scipy.special._digamma import polevl_definition +from cupyx.scipy.special._gammainc import _lanczos_preamble +from cupyx.scipy.special._gammainc import p1evl_definition +from cupyx.scipy.special._zeta import zeta_definition + +zetac_h = """ +#include + +__constant__ double azetac[] = { + -1.50000000000000000000E0, + 0.0, /* Not used; zetac(1.0) is infinity. */ + 6.44934066848226436472E-1, + 2.02056903159594285400E-1, + 8.23232337111381915160E-2, + 3.69277551433699263314E-2, + 1.73430619844491397145E-2, + 8.34927738192282683980E-3, + 4.07735619794433937869E-3, + 2.00839282608221441785E-3, + 9.94575127818085337146E-4, + 4.94188604119464558702E-4, + 2.46086553308048298638E-4, + 1.22713347578489146752E-4, + 6.12481350587048292585E-5, + 3.05882363070204935517E-5, + 1.52822594086518717326E-5, + 7.63719763789976227360E-6, + 3.81729326499983985646E-6, + 1.90821271655393892566E-6, + 9.53962033872796113152E-7, + 4.76932986787806463117E-7, + 2.38450502727732990004E-7, + 1.19219925965311073068E-7, + 5.96081890512594796124E-8, + 2.98035035146522801861E-8, + 1.49015548283650412347E-8, + 7.45071178983542949198E-9, + 3.72533402478845705482E-9, + 1.86265972351304900640E-9, + 9.31327432419668182872E-10}; + +/* 2**x (1 - 1/x) (zeta(x) - 1) = P(1/x)/Q(1/x), 1 <= x <= 10 */ +__constant__ double P[9] = { + 5.85746514569725319540E11, + 2.57534127756102572888E11, + 4.87781159567948256438E10, + 5.15399538023885770696E9, + 3.41646073514754094281E8, + 1.60837006880656492731E7, + 5.92785467342109522998E5, + 1.51129169964938823117E4, + 2.01822444485997955865E2, +}; + +__constant__ double Q[8] = { + /* 1.00000000000000000000E0, */ + 3.90497676373371157516E11, + 5.22858235368272161797E10, + 5.64451517271280543351E9, + 3.39006746015350418834E8, + 1.79410371500126453702E7, + 5.66666825131384797029E5, + 1.60382976810944131506E4, + 1.96436237223387314144E2, +}; + +/* log(zeta(x) - 1 - 2**-x), 10 <= x <= 50 */ +__constant__ double Z[11] = { + 8.70728567484590192539E6, + 1.76506865670346462757E8, + 2.60889506707483264896E10, + 5.29806374009894791647E11, + 2.26888156119238241487E13, + 3.31884402932705083599E14, + 5.13778997975868230192E15, + -1.98123688133907171455E15, + -9.92763810039983572356E16, + 7.82905376180870586444E16, + 9.26786275768927717187E16, +}; + +__constant__ double B[10] = { + /* 1.00000000000000000000E0, */ + -7.92625410563741062861E6, + -1.60529969932920229676E8, + -2.37669260975543221788E10, + -4.80319584350455169857E11, + -2.07820961754173320170E13, + -2.96075404507272223680E14, + -4.86299103694609136686E15, + 5.34589509675789930199E15, + 5.71464111092297631292E16, + -1.79915597658676556828E16, +}; + +/* (1-x) (zeta(x) - 1), 0 <= x <= 1 */ +__constant__ double R[6] = { + -3.28717474506562731748E-1, + 1.55162528742623950834E1, + -2.48762831680821954401E2, + 1.01050368053237678329E3, + 1.26726061410235149405E4, + -1.11578094770515181334E5, +}; + +__constant__ double S[5] = { + /* 1.00000000000000000000E0, */ + 1.95107674914060531512E1, + 3.17710311750646984099E2, + 3.03835500874445748734E3, + 2.03665876435770579345E4, + 7.43853965136767874343E4, +}; + +__constant__ double TAYLOR0[10] = { + -1.0000000009110164892, + -1.0000000057646759799, + -9.9999983138417361078e-1, + -1.0000013011460139596, + -1.000001940896320456, + -9.9987929950057116496e-1, + -1.000785194477042408, + -1.0031782279542924256, + -9.1893853320467274178e-1, + -1.5, +}; + +#define MAXL2 127 +#define SQRT_2_PI 0.79788456080286535587989 +#define M_E 2.71828182845904523536028747135 /* e */ + +""" +zetac_positive_definition = """ +/* + * Compute zetac for positive arguments + */ +__device__ inline double zetac_positive(double x) +{ + int i; + double a, b, s, w; + + if (x == 1.0) + { + return CUDART_INF; + } + + if (x >= MAXL2) + { + /* because first term is 2**-x */ + return 0.0; + } + + /* Tabulated values for integer argument */ + w = floor(x); + if (w == x) + { + i = x; + if (i < 31) + { + return (azetac[i]); + } + } + + if (x < 1.0) + { + w = 1.0 - x; + a = polevl<5>(x, R) / (w * p1evl<5>(x, S)); + return a; + } + + if (x <= 10.0) + { + b = pow(2.0, x) * (x - 1.0); + w = 1.0 / x; + s = (x * polevl<8>(w, P)) / (b * p1evl<8>(w, Q)); + return s; + } + + if (x <= 50.0) + { + b = pow(2.0, -x); + w = polevl<10>(x, Z) / p1evl<10>(x, B); + w = exp(w) + b; + return w; + } + + /* Basic sum of inverse powers */ + s = 0.0; + a = 1.0; + do + { + a += 2.0; + b = pow(a, -x); + s += b; + } while (b / s > MACHEP); + + b = pow(2.0, -x); + s = (s + b) / (1.0 - b); + return s; +} + +""" + +zetac_smallneg_definition = """ +__device__ inline double zetac_smallneg(double x) +{ + return polevl<9>(x, TAYLOR0); +} +""" + +zeta_reflection = """ +__device__ inline double zeta_reflection(double x) +{ + double base, large_term, small_term, hx, x_shift; + + hx = x / 2; + if (hx == floor(hx)) + { + /* Hit a zero of the sine factor */ + return 0; + } + + /* Reduce the argument to sine */ + x_shift = fmod(x, 4); + small_term = -SQRT_2_PI * sin(0.5 * M_PI * x_shift); + small_term *= lanczos_sum_expg_scaled(x + 1) * zeta(x + 1, 1); + + /* Group large terms together to prevent overflow */ + base = (x + lanczos_g + 0.5) / (2 * M_PI *M_E); + large_term = pow(base, x + 0.5); + if (isfinite(large_term)) + { + return large_term * small_term; + } + /* + * We overflowed, but we might be able to stave off overflow by + * factoring in the small term earlier. To do this we compute + * + * (sqrt(large_term) * small_term) * sqrt(large_term) + * + * Since we only call this method for negative x bounded away from + * zero, the small term can only be as small sine on that region; + * i.e. about machine epsilon. This means that if the above still + * overflows, then there was truly no avoiding it. + */ + large_term = pow(base, 0.5 * x + 0.25); + return (large_term * small_term) * large_term; +} + +""" +zetac_definition = ( + zeta_definition + + zetac_h + + zetac_smallneg_definition + + zetac_positive_definition + + zeta_reflection + + """ + /* + * Riemann zeta function, minus one + */ +double __device__ zetac(double x) +{ + if (isnan(x)) + { + return x; + } + else if (x == -CUDART_INF) + { + return nan(""); + } + else if (x < 0.0 && x > -0.01) + { + return zetac_smallneg(x); + } + else if (x < 0.0) + { + return zeta_reflection(-x) - 1; + } + else + { + return zetac_positive(x); + } +} + """ +) +zetac_preamble = (polevl_definition+p1evl_definition + + _lanczos_preamble+zetac_definition) + +zetac = _core.create_ufunc( + 'cupyx_scipy_special_zetac', ('f->f', 'd->d'), + 'out0 = zetac(in0)', + preamble=zetac_preamble, + doc="""Riemann zeta function minus 1. + + Args: + x (cupy.ndarray): Input data, must be real. + + Returns: + cupy.ndarray: Values of zeta(x)-1. + + .. seealso:: :data:`scipy.special.zetac` + + """) diff --git a/vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc b/vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37601247b4ee38e5524aae45c59bed6a06f12083 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f8ab8ef1cfb2e9d4dcc8ece2e51ee35ea22529e6a08e62c1f0600eefa4786a8 +size 358705