Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__init__.py +44 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_base.py +585 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_compressed.py +860 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_construct.py +582 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_coo.py +568 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csc.py +413 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csr.py +1242 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_data.py +398 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_dia.py +219 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_extract.py +81 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_index.py +703 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_sputils.py +169 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_util.py +26 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__init__.py +4 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/_traversal.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/_traversal.py +119 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__init__.py +22 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_eigen.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_iterative.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_lobpcg.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_solve.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_eigen.py +430 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_interface.py +578 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_iterative.py +409 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_lobpcg.py +674 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_norm.py +111 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_solve.py +1036 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_basic.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_bessel.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_beta.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_binom.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_complexstuff.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_convex_analysis.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_digamma.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_ellip.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_erf.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_exp1.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expi.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expn.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gamma.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammainc.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammaln.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammasgn.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_loggamma.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1439,3 +1439,4 @@ parrot/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_ba
|
|
| 1439 |
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1440 |
vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1441 |
vglm/bin/python filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1439 |
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1440 |
vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1441 |
vglm/bin/python filter=lfs diff=lfs merge=lfs -text
|
| 1442 |
+
vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__init__.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cupyx.scipy.sparse._base import issparse # NOQA
|
| 2 |
+
from cupyx.scipy.sparse._base import isspmatrix # NOQA
|
| 3 |
+
from cupyx.scipy.sparse._base import spmatrix # NOQA
|
| 4 |
+
from cupyx.scipy.sparse._base import SparseWarning # NOQA
|
| 5 |
+
from cupyx.scipy.sparse._base import SparseEfficiencyWarning # NOQA
|
| 6 |
+
from cupyx.scipy.sparse._coo import coo_matrix # NOQA
|
| 7 |
+
from cupyx.scipy.sparse._coo import isspmatrix_coo # NOQA
|
| 8 |
+
from cupyx.scipy.sparse._csc import csc_matrix # NOQA
|
| 9 |
+
from cupyx.scipy.sparse._csc import isspmatrix_csc # NOQA
|
| 10 |
+
from cupyx.scipy.sparse._csr import csr_matrix # NOQA
|
| 11 |
+
from cupyx.scipy.sparse._csr import isspmatrix_csr # NOQA
|
| 12 |
+
from cupyx.scipy.sparse._dia import dia_matrix # NOQA
|
| 13 |
+
from cupyx.scipy.sparse._dia import isspmatrix_dia # NOQA
|
| 14 |
+
|
| 15 |
+
from cupyx.scipy.sparse._construct import eye # NOQA
|
| 16 |
+
from cupyx.scipy.sparse._construct import identity # NOQA
|
| 17 |
+
from cupyx.scipy.sparse._construct import rand # NOQA
|
| 18 |
+
from cupyx.scipy.sparse._construct import random # NOQA
|
| 19 |
+
from cupyx.scipy.sparse._construct import spdiags # NOQA
|
| 20 |
+
from cupyx.scipy.sparse._construct import diags # NOQA
|
| 21 |
+
|
| 22 |
+
from cupyx.scipy.sparse._construct import bmat # NOQA
|
| 23 |
+
from cupyx.scipy.sparse._construct import hstack # NOQA
|
| 24 |
+
from cupyx.scipy.sparse._construct import vstack # NOQA
|
| 25 |
+
|
| 26 |
+
# TODO(unno): implement bsr_matrix
|
| 27 |
+
# TODO(unno): implement dok_matrix
|
| 28 |
+
# TODO(unno): implement lil_matrix
|
| 29 |
+
|
| 30 |
+
from cupyx.scipy.sparse._construct import kron # NOQA
|
| 31 |
+
from cupyx.scipy.sparse._construct import kronsum # NOQA
|
| 32 |
+
# TODO(unno): implement diags
|
| 33 |
+
# TODO(unno): implement block_diag
|
| 34 |
+
|
| 35 |
+
from cupyx.scipy.sparse._extract import find # NOQA
|
| 36 |
+
from cupyx.scipy.sparse._extract import tril # NOQA
|
| 37 |
+
from cupyx.scipy.sparse._extract import triu # NOQA
|
| 38 |
+
|
| 39 |
+
# TODO(unno): implement save_npz
|
| 40 |
+
# TODO(unno): implement load_npz
|
| 41 |
+
|
| 42 |
+
# TODO(unno): implement isspmatrix_bsr(x)
|
| 43 |
+
# TODO(unno): implement isspmatrix_lil(x)
|
| 44 |
+
# TODO(unno): implement isspmatrix_dok(x)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_base.py
ADDED
|
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
|
| 3 |
+
import cupy
|
| 4 |
+
from cupy import _core
|
| 5 |
+
from cupyx.scipy.sparse import _util
|
| 6 |
+
from cupyx.scipy.sparse import _sputils
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
import scipy.sparse as _sparse
|
| 11 |
+
SparseWarning = _sparse.SparseWarning
|
| 12 |
+
SparseEfficiencyWarning = _sparse.SparseEfficiencyWarning
|
| 13 |
+
except ImportError:
|
| 14 |
+
class SparseWarning(Warning): # type: ignore
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
class SparseEfficiencyWarning(SparseWarning): # type: ignore
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# TODO(asi1024): Implement _spbase
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class spmatrix(object):
|
| 25 |
+
|
| 26 |
+
"""Base class of all sparse matrixes.
|
| 27 |
+
|
| 28 |
+
See :class:`scipy.sparse.spmatrix`
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
__array_priority__ = 101
|
| 32 |
+
|
| 33 |
+
def __init__(self, maxprint=50):
|
| 34 |
+
if self.__class__ == spmatrix:
|
| 35 |
+
raise ValueError(
|
| 36 |
+
'This class is not intended to be instantiated directly.')
|
| 37 |
+
self.maxprint = maxprint
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def device(self):
|
| 41 |
+
"""CUDA device on which this array resides."""
|
| 42 |
+
raise NotImplementedError
|
| 43 |
+
|
| 44 |
+
def get(self, stream=None):
|
| 45 |
+
"""Return a copy of the array on host memory.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
|
| 49 |
+
copy runs asynchronously. Otherwise, the copy is synchronous.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
scipy.sparse.spmatrix: An array on host memory.
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
raise NotImplementedError
|
| 56 |
+
|
| 57 |
+
def __len__(self):
|
| 58 |
+
raise TypeError('sparse matrix length is ambiguous; '
|
| 59 |
+
'use getnnz() or shape[0]')
|
| 60 |
+
|
| 61 |
+
def __str__(self):
|
| 62 |
+
# TODO(unno): Do not use get method which is only available when scipy
|
| 63 |
+
# is installed.
|
| 64 |
+
return str(self.get())
|
| 65 |
+
|
| 66 |
+
def __iter__(self):
|
| 67 |
+
for r in range(self.shape[0]):
|
| 68 |
+
yield self[r, :]
|
| 69 |
+
|
| 70 |
+
def __bool__(self):
|
| 71 |
+
if self.shape == (1, 1):
|
| 72 |
+
return self.nnz != 0
|
| 73 |
+
else:
|
| 74 |
+
raise ValueError('The truth value of an array with more than one '
|
| 75 |
+
'element is ambiguous. Use a.any() or a.all().')
|
| 76 |
+
|
| 77 |
+
__nonzero__ = __bool__
|
| 78 |
+
|
| 79 |
+
def __eq__(self, other):
|
| 80 |
+
return self.tocsr().__eq__(other)
|
| 81 |
+
|
| 82 |
+
def __ne__(self, other):
|
| 83 |
+
return self.tocsr().__ne__(other)
|
| 84 |
+
|
| 85 |
+
def __lt__(self, other):
|
| 86 |
+
return self.tocsr().__lt__(other)
|
| 87 |
+
|
| 88 |
+
def __gt__(self, other):
|
| 89 |
+
return self.tocsr().__gt__(other)
|
| 90 |
+
|
| 91 |
+
def __le__(self, other):
|
| 92 |
+
return self.tocsr().__le__(other)
|
| 93 |
+
|
| 94 |
+
def __ge__(self, other):
|
| 95 |
+
return self.tocsr().__ge__(other)
|
| 96 |
+
|
| 97 |
+
def __abs__(self):
|
| 98 |
+
return self.tocsr().__abs__()
|
| 99 |
+
|
| 100 |
+
def __add__(self, other):
|
| 101 |
+
return self.tocsr().__add__(other)
|
| 102 |
+
|
| 103 |
+
def __radd__(self, other):
|
| 104 |
+
return self.tocsr().__radd__(other)
|
| 105 |
+
|
| 106 |
+
def __sub__(self, other):
|
| 107 |
+
return self.tocsr().__sub__(other)
|
| 108 |
+
|
| 109 |
+
def __rsub__(self, other):
|
| 110 |
+
return self.tocsr().__rsub__(other)
|
| 111 |
+
|
| 112 |
+
def __mul__(self, other):
|
| 113 |
+
return self.tocsr().__mul__(other)
|
| 114 |
+
|
| 115 |
+
def __rmul__(self, other):
|
| 116 |
+
if cupy.isscalar(other) or isdense(other) and other.ndim == 0:
|
| 117 |
+
return self * other
|
| 118 |
+
else:
|
| 119 |
+
try:
|
| 120 |
+
tr = other.T
|
| 121 |
+
except AttributeError:
|
| 122 |
+
return NotImplemented
|
| 123 |
+
return (self.T * tr).T
|
| 124 |
+
|
| 125 |
+
# matmul (@) operator
|
| 126 |
+
def __matmul__(self, other):
|
| 127 |
+
if _util.isscalarlike(other):
|
| 128 |
+
raise ValueError('Scalar operands are not allowed, '
|
| 129 |
+
'use \'*\' instead')
|
| 130 |
+
return self.__mul__(other)
|
| 131 |
+
|
| 132 |
+
def __rmatmul__(self, other):
|
| 133 |
+
if _util.isscalarlike(other):
|
| 134 |
+
raise ValueError('Scalar operands are not allowed, '
|
| 135 |
+
'use \'*\' instead')
|
| 136 |
+
return self.__rmul__(other)
|
| 137 |
+
|
| 138 |
+
def __div__(self, other):
|
| 139 |
+
return self.tocsr().__div__(other)
|
| 140 |
+
|
| 141 |
+
def __rdiv__(self, other):
|
| 142 |
+
return self.tocsr().__rdiv__(other)
|
| 143 |
+
|
| 144 |
+
def __truediv__(self, other):
|
| 145 |
+
return self.tocsr().__truediv__(other)
|
| 146 |
+
|
| 147 |
+
def __rtruediv__(self, other):
|
| 148 |
+
return self.tocsr().__rtruediv__(other)
|
| 149 |
+
|
| 150 |
+
def __neg__(self):
|
| 151 |
+
return -self.tocsr()
|
| 152 |
+
|
| 153 |
+
def __iadd__(self, other):
|
| 154 |
+
return NotImplemented
|
| 155 |
+
|
| 156 |
+
def __isub__(self, other):
|
| 157 |
+
return NotImplemented
|
| 158 |
+
|
| 159 |
+
def __imul__(self, other):
|
| 160 |
+
return NotImplemented
|
| 161 |
+
|
| 162 |
+
def __idiv__(self, other):
|
| 163 |
+
return self.__itruediv__(other)
|
| 164 |
+
|
| 165 |
+
def __itruediv__(self, other):
|
| 166 |
+
return NotImplemented
|
| 167 |
+
|
| 168 |
+
def __pow__(self, other):
|
| 169 |
+
"""Calculates n-th power of the matrix.
|
| 170 |
+
|
| 171 |
+
This method calculates n-th power of a given matrix. The matrix must
|
| 172 |
+
be a squared matrix, and a given exponent must be an integer.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
other (int): Exponent.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
cupyx.scipy.sparse.spmatrix: A sparse matrix representing n-th
|
| 179 |
+
power of this matrix.
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
m, n = self.shape
|
| 183 |
+
if m != n:
|
| 184 |
+
raise TypeError('matrix is not square')
|
| 185 |
+
|
| 186 |
+
if _util.isintlike(other):
|
| 187 |
+
other = int(other)
|
| 188 |
+
if other < 0:
|
| 189 |
+
raise ValueError('exponent must be >= 0')
|
| 190 |
+
|
| 191 |
+
if other == 0:
|
| 192 |
+
import cupyx.scipy.sparse
|
| 193 |
+
return cupyx.scipy.sparse.identity(
|
| 194 |
+
m, dtype=self.dtype, format='csr')
|
| 195 |
+
elif other == 1:
|
| 196 |
+
return self.copy()
|
| 197 |
+
else:
|
| 198 |
+
tmp = self.__pow__(other // 2)
|
| 199 |
+
if other % 2:
|
| 200 |
+
return self * tmp * tmp
|
| 201 |
+
else:
|
| 202 |
+
return tmp * tmp
|
| 203 |
+
elif _util.isscalarlike(other):
|
| 204 |
+
raise ValueError('exponent must be an integer')
|
| 205 |
+
else:
|
| 206 |
+
return NotImplemented
|
| 207 |
+
|
| 208 |
+
@property
|
| 209 |
+
def A(self):
|
| 210 |
+
"""Dense ndarray representation of this matrix.
|
| 211 |
+
|
| 212 |
+
This property is equivalent to
|
| 213 |
+
:meth:`~cupyx.scipy.sparse.spmatrix.toarray` method.
|
| 214 |
+
|
| 215 |
+
"""
|
| 216 |
+
return self.toarray()
|
| 217 |
+
|
| 218 |
+
@property
|
| 219 |
+
def T(self):
|
| 220 |
+
return self.transpose()
|
| 221 |
+
|
| 222 |
+
@property
|
| 223 |
+
def H(self):
|
| 224 |
+
return self.getH()
|
| 225 |
+
|
| 226 |
+
@property
|
| 227 |
+
def ndim(self):
|
| 228 |
+
return 2
|
| 229 |
+
|
| 230 |
+
@property
|
| 231 |
+
def size(self):
|
| 232 |
+
return self.getnnz()
|
| 233 |
+
|
| 234 |
+
@property
|
| 235 |
+
def nnz(self):
|
| 236 |
+
return self.getnnz()
|
| 237 |
+
|
| 238 |
+
@property
|
| 239 |
+
def shape(self):
|
| 240 |
+
return self.get_shape()
|
| 241 |
+
|
| 242 |
+
@shape.setter
|
| 243 |
+
def shape(self, value):
|
| 244 |
+
self.set_shape(value)
|
| 245 |
+
|
| 246 |
+
def asformat(self, format):
|
| 247 |
+
"""Return this matrix in a given sparse format.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
format (str or None): Format you need.
|
| 251 |
+
"""
|
| 252 |
+
if format is None or format == self.format:
|
| 253 |
+
return self
|
| 254 |
+
else:
|
| 255 |
+
return getattr(self, 'to' + format)()
|
| 256 |
+
|
| 257 |
+
def asfptype(self):
|
| 258 |
+
"""Upcasts matrix to a floating point format.
|
| 259 |
+
|
| 260 |
+
When the matrix has floating point type, the method returns itself.
|
| 261 |
+
Otherwise it makes a copy with floating point type and the same format.
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
cupyx.scipy.sparse.spmatrix: A matrix with float type.
|
| 265 |
+
|
| 266 |
+
"""
|
| 267 |
+
if self.dtype.kind == 'f':
|
| 268 |
+
return self
|
| 269 |
+
else:
|
| 270 |
+
typ = numpy.promote_types(self.dtype, 'f')
|
| 271 |
+
return self.astype(typ)
|
| 272 |
+
|
| 273 |
+
def astype(self, t):
|
| 274 |
+
"""Casts the array to given data type.
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
t: Type specifier.
|
| 278 |
+
|
| 279 |
+
Returns:
|
| 280 |
+
cupyx.scipy.sparse.spmatrix:
|
| 281 |
+
A copy of the array with the given type and the same format.
|
| 282 |
+
|
| 283 |
+
"""
|
| 284 |
+
return self.tocsr().astype(t).asformat(self.format)
|
| 285 |
+
|
| 286 |
+
def conj(self, copy=True):
|
| 287 |
+
"""Element-wise complex conjugation.
|
| 288 |
+
|
| 289 |
+
If the matrix is of non-complex data type and `copy` is False,
|
| 290 |
+
this method does nothing and the data is not copied.
|
| 291 |
+
|
| 292 |
+
Args:
|
| 293 |
+
copy (bool):
|
| 294 |
+
If True, the result is guaranteed to not share data with self.
|
| 295 |
+
|
| 296 |
+
Returns:
|
| 297 |
+
cupyx.scipy.sparse.spmatrix : The element-wise complex conjugate.
|
| 298 |
+
|
| 299 |
+
"""
|
| 300 |
+
if self.dtype.kind == 'c':
|
| 301 |
+
return self.tocsr(copy=copy).conj(copy=False)
|
| 302 |
+
elif copy:
|
| 303 |
+
return self.copy()
|
| 304 |
+
else:
|
| 305 |
+
return self
|
| 306 |
+
|
| 307 |
+
def conjugate(self, copy=True):
|
| 308 |
+
return self.conj(copy=copy)
|
| 309 |
+
|
| 310 |
+
conjugate.__doc__ = conj.__doc__
|
| 311 |
+
|
| 312 |
+
def copy(self):
|
| 313 |
+
"""Returns a copy of this matrix.
|
| 314 |
+
|
| 315 |
+
No data/indices will be shared between the returned value and current
|
| 316 |
+
matrix.
|
| 317 |
+
"""
|
| 318 |
+
return self.__class__(self, copy=True)
|
| 319 |
+
|
| 320 |
+
def count_nonzero(self):
|
| 321 |
+
"""Number of non-zero entries, equivalent to"""
|
| 322 |
+
raise NotImplementedError
|
| 323 |
+
|
| 324 |
+
def diagonal(self, k=0):
|
| 325 |
+
"""Returns the k-th diagonal of the matrix.
|
| 326 |
+
|
| 327 |
+
Args:
|
| 328 |
+
k (int, optional): Which diagonal to get, corresponding to elements
|
| 329 |
+
a[i, i+k]. Default: 0 (the main diagonal).
|
| 330 |
+
|
| 331 |
+
Returns:
|
| 332 |
+
cupy.ndarray : The k-th diagonal.
|
| 333 |
+
"""
|
| 334 |
+
return self.tocsr().diagonal(k=k)
|
| 335 |
+
|
| 336 |
+
def dot(self, other):
|
| 337 |
+
"""Ordinary dot product"""
|
| 338 |
+
if numpy.isscalar(other):
|
| 339 |
+
return self * other
|
| 340 |
+
else:
|
| 341 |
+
return self @ other
|
| 342 |
+
|
| 343 |
+
def getH(self):
|
| 344 |
+
return self.transpose().conj()
|
| 345 |
+
|
| 346 |
+
def get_shape(self):
|
| 347 |
+
raise NotImplementedError
|
| 348 |
+
|
| 349 |
+
# TODO(unno): Implement getcol
|
| 350 |
+
|
| 351 |
+
def getformat(self):
|
| 352 |
+
return self.format
|
| 353 |
+
|
| 354 |
+
def getmaxprint(self):
|
| 355 |
+
return self.maxprint
|
| 356 |
+
|
| 357 |
+
def getnnz(self, axis=None):
|
| 358 |
+
"""Number of stored values, including explicit zeros."""
|
| 359 |
+
raise NotImplementedError
|
| 360 |
+
|
| 361 |
+
# TODO(unno): Implement getrow
|
| 362 |
+
|
| 363 |
+
def maximum(self, other):
|
| 364 |
+
return self.tocsr().maximum(other)
|
| 365 |
+
|
| 366 |
+
def mean(self, axis=None, dtype=None, out=None):
|
| 367 |
+
"""
|
| 368 |
+
Compute the arithmetic mean along the specified axis.
|
| 369 |
+
|
| 370 |
+
Returns the average of the matrix elements. The average is taken
|
| 371 |
+
over all elements in the matrix by default, otherwise over the
|
| 372 |
+
specified axis. `float64` intermediate and return values are used
|
| 373 |
+
for integer inputs.
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
axis {-2, -1, 0, 1, None}: optional
|
| 377 |
+
Axis along which the mean is computed. The default is to
|
| 378 |
+
compute the mean of all elements in the matrix
|
| 379 |
+
(i.e., `axis` = `None`).
|
| 380 |
+
dtype (dtype): optional
|
| 381 |
+
Type to use in computing the mean. For integer inputs, the
|
| 382 |
+
default is `float64`; for floating point inputs, it is the same
|
| 383 |
+
as the input dtype.
|
| 384 |
+
out (cupy.ndarray): optional
|
| 385 |
+
Alternative output matrix in which to place the result. It must
|
| 386 |
+
have the same shape as the expected output, but the type of the
|
| 387 |
+
output values will be cast if necessary.
|
| 388 |
+
|
| 389 |
+
Returns:
|
| 390 |
+
m (cupy.ndarray) : Output array of means
|
| 391 |
+
|
| 392 |
+
.. seealso::
|
| 393 |
+
:meth:`scipy.sparse.spmatrix.mean`
|
| 394 |
+
|
| 395 |
+
"""
|
| 396 |
+
def _is_integral(dtype):
|
| 397 |
+
return (cupy.issubdtype(dtype, cupy.integer) or
|
| 398 |
+
cupy.issubdtype(dtype, cupy.bool_))
|
| 399 |
+
|
| 400 |
+
_sputils.validateaxis(axis)
|
| 401 |
+
|
| 402 |
+
res_dtype = self.dtype.type
|
| 403 |
+
integral = _is_integral(self.dtype)
|
| 404 |
+
|
| 405 |
+
# output dtype
|
| 406 |
+
if dtype is None:
|
| 407 |
+
if integral:
|
| 408 |
+
res_dtype = cupy.float64
|
| 409 |
+
else:
|
| 410 |
+
res_dtype = cupy.dtype(dtype).type
|
| 411 |
+
|
| 412 |
+
# intermediate dtype for summation
|
| 413 |
+
inter_dtype = cupy.float64 if integral else res_dtype
|
| 414 |
+
inter_self = self.astype(inter_dtype)
|
| 415 |
+
|
| 416 |
+
if axis is None:
|
| 417 |
+
return (inter_self / cupy.array(
|
| 418 |
+
self.shape[0] * self.shape[1]))\
|
| 419 |
+
.sum(dtype=res_dtype, out=out)
|
| 420 |
+
|
| 421 |
+
if axis < 0:
|
| 422 |
+
axis += 2
|
| 423 |
+
|
| 424 |
+
# axis = 0 or 1 now
|
| 425 |
+
if axis == 0:
|
| 426 |
+
return (inter_self * (1.0 / self.shape[0])).sum(
|
| 427 |
+
axis=0, dtype=res_dtype, out=out)
|
| 428 |
+
else:
|
| 429 |
+
return (inter_self * (1.0 / self.shape[1])).sum(
|
| 430 |
+
axis=1, dtype=res_dtype, out=out)
|
| 431 |
+
|
| 432 |
+
def minimum(self, other):
|
| 433 |
+
return self.tocsr().minimum(other)
|
| 434 |
+
|
| 435 |
+
def multiply(self, other):
|
| 436 |
+
"""Point-wise multiplication by another matrix"""
|
| 437 |
+
return self.tocsr().multiply(other)
|
| 438 |
+
|
| 439 |
+
# TODO(unno): Implement nonzero
|
| 440 |
+
|
| 441 |
+
def power(self, n, dtype=None):
|
| 442 |
+
return self.tocsr().power(n, dtype=dtype)
|
| 443 |
+
|
| 444 |
+
def reshape(self, *shape, order='C'):
|
| 445 |
+
"""Gives a new shape to a sparse matrix without changing its data.
|
| 446 |
+
|
| 447 |
+
Args:
|
| 448 |
+
shape (tuple):
|
| 449 |
+
The new shape should be compatible with the original shape.
|
| 450 |
+
order: {'C', 'F'} (optional)
|
| 451 |
+
Read the elements using this index order. 'C' means to read and
|
| 452 |
+
write the elements using C-like index order. 'F' means to read
|
| 453 |
+
and write the elements using Fortran-like index order. Default:
|
| 454 |
+
C.
|
| 455 |
+
|
| 456 |
+
Returns:
|
| 457 |
+
cupyx.scipy.sparse.coo_matrix: sparse matrix
|
| 458 |
+
|
| 459 |
+
"""
|
| 460 |
+
shape = _sputils.check_shape(shape, self.shape)
|
| 461 |
+
|
| 462 |
+
if shape == self.shape:
|
| 463 |
+
return self
|
| 464 |
+
|
| 465 |
+
return self.tocoo().reshape(shape, order=order)
|
| 466 |
+
|
| 467 |
+
def set_shape(self, shape):
|
| 468 |
+
self.reshape(shape)
|
| 469 |
+
|
| 470 |
+
def setdiag(self, values, k=0):
|
| 471 |
+
"""Set diagonal or off-diagonal elements of the array.
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
values (cupy.ndarray): New values of the diagonal elements.
|
| 475 |
+
Values may have any length. If the diagonal is longer than
|
| 476 |
+
values, then the remaining diagonal entries will not be set.
|
| 477 |
+
If values is longer than the diagonal, then the remaining
|
| 478 |
+
values are ignored. If a scalar value is given, all of the
|
| 479 |
+
diagonal is set to it.
|
| 480 |
+
k (int, optional): Which diagonal to set, corresponding to elements
|
| 481 |
+
a[i, i+k]. Default: 0 (the main diagonal).
|
| 482 |
+
"""
|
| 483 |
+
raise NotImplementedError
|
| 484 |
+
|
| 485 |
+
def sum(self, axis=None, dtype=None, out=None):
|
| 486 |
+
"""Sums the matrix elements over a given axis.
|
| 487 |
+
|
| 488 |
+
Args:
|
| 489 |
+
axis (int or ``None``): Axis along which the sum is computed.
|
| 490 |
+
If it is ``None``, it computes the sum of all the elements.
|
| 491 |
+
Select from ``{None, 0, 1, -2, -1}``.
|
| 492 |
+
dtype: The type of returned matrix. If it is not specified, type
|
| 493 |
+
of the array is used.
|
| 494 |
+
out (cupy.ndarray): Output matrix.
|
| 495 |
+
|
| 496 |
+
Returns:
|
| 497 |
+
cupy.ndarray: Summed array.
|
| 498 |
+
|
| 499 |
+
.. seealso::
|
| 500 |
+
:meth:`scipy.sparse.spmatrix.sum`
|
| 501 |
+
|
| 502 |
+
"""
|
| 503 |
+
_sputils.validateaxis(axis)
|
| 504 |
+
|
| 505 |
+
# This implementation uses multiplication, though it is not efficient
|
| 506 |
+
# for some matrix types. These should override this function.
|
| 507 |
+
|
| 508 |
+
m, n = self.shape
|
| 509 |
+
|
| 510 |
+
if axis is None:
|
| 511 |
+
return self.dot(cupy.ones(n, dtype=self.dtype)).sum(
|
| 512 |
+
dtype=dtype, out=out)
|
| 513 |
+
|
| 514 |
+
if axis < 0:
|
| 515 |
+
axis += 2
|
| 516 |
+
|
| 517 |
+
if axis == 0:
|
| 518 |
+
ret = self.T.dot(cupy.ones(m, dtype=self.dtype)).reshape(1, n)
|
| 519 |
+
else: # axis == 1
|
| 520 |
+
ret = self.dot(cupy.ones(n, dtype=self.dtype)).reshape(m, 1)
|
| 521 |
+
|
| 522 |
+
if out is not None:
|
| 523 |
+
if out.shape != ret.shape:
|
| 524 |
+
raise ValueError('dimensions do not match')
|
| 525 |
+
_core.elementwise_copy(ret, out)
|
| 526 |
+
return out
|
| 527 |
+
elif dtype is not None:
|
| 528 |
+
return ret.astype(dtype, copy=False)
|
| 529 |
+
else:
|
| 530 |
+
return ret
|
| 531 |
+
|
| 532 |
+
def toarray(self, order=None, out=None):
|
| 533 |
+
"""Return a dense ndarray representation of this matrix."""
|
| 534 |
+
return self.tocsr().toarray(order=order, out=out)
|
| 535 |
+
|
| 536 |
+
def tobsr(self, blocksize=None, copy=False):
|
| 537 |
+
"""Convert this matrix to Block Sparse Row format."""
|
| 538 |
+
return self.tocsr(copy=copy).tobsr(copy=False)
|
| 539 |
+
|
| 540 |
+
def tocoo(self, copy=False):
|
| 541 |
+
"""Convert this matrix to COOrdinate format."""
|
| 542 |
+
return self.tocsr(copy=copy).tocoo(copy=False)
|
| 543 |
+
|
| 544 |
+
def tocsc(self, copy=False):
|
| 545 |
+
"""Convert this matrix to Compressed Sparse Column format."""
|
| 546 |
+
return self.tocsr(copy=copy).tocsc(copy=False)
|
| 547 |
+
|
| 548 |
+
def tocsr(self, copy=False):
|
| 549 |
+
"""Convert this matrix to Compressed Sparse Row format."""
|
| 550 |
+
raise NotImplementedError
|
| 551 |
+
|
| 552 |
+
def todense(self, order=None, out=None):
|
| 553 |
+
"""Return a dense matrix representation of this matrix."""
|
| 554 |
+
return self.toarray(order=order, out=out)
|
| 555 |
+
|
| 556 |
+
def todia(self, copy=False):
|
| 557 |
+
"""Convert this matrix to sparse DIAgonal format."""
|
| 558 |
+
return self.tocsr(copy=copy).todia(copy=False)
|
| 559 |
+
|
| 560 |
+
def todok(self, copy=False):
|
| 561 |
+
"""Convert this matrix to Dictionary Of Keys format."""
|
| 562 |
+
return self.tocsr(copy=copy).todok(copy=False)
|
| 563 |
+
|
| 564 |
+
def tolil(self, copy=False):
|
| 565 |
+
"""Convert this matrix to LInked List format."""
|
| 566 |
+
return self.tocsr(copy=copy).tolil(copy=False)
|
| 567 |
+
|
| 568 |
+
def transpose(self, axes=None, copy=False):
|
| 569 |
+
"""Reverses the dimensions of the sparse matrix."""
|
| 570 |
+
return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def issparse(x):
|
| 574 |
+
"""Checks if a given matrix is a sparse matrix.
|
| 575 |
+
|
| 576 |
+
Returns:
|
| 577 |
+
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.spmatrix` that is
|
| 578 |
+
a base class of all sparse matrix classes.
|
| 579 |
+
|
| 580 |
+
"""
|
| 581 |
+
return isinstance(x, spmatrix)
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
isdense = _util.isdense
|
| 585 |
+
isspmatrix = issparse
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_compressed.py
ADDED
|
@@ -0,0 +1,860 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import string
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import numpy
|
| 5 |
+
try:
|
| 6 |
+
import scipy.sparse
|
| 7 |
+
scipy_available = True
|
| 8 |
+
except ImportError:
|
| 9 |
+
scipy_available = False
|
| 10 |
+
|
| 11 |
+
import cupy
|
| 12 |
+
import cupyx
|
| 13 |
+
|
| 14 |
+
from cupy import _core
|
| 15 |
+
from cupy._core import _scalar
|
| 16 |
+
from cupy._creation import basic
|
| 17 |
+
from cupyx.scipy.sparse import _base
|
| 18 |
+
from cupyx.scipy.sparse import _coo
|
| 19 |
+
from cupyx.scipy.sparse import _data as sparse_data
|
| 20 |
+
from cupyx.scipy.sparse import _sputils
|
| 21 |
+
from cupyx.scipy.sparse import _util
|
| 22 |
+
|
| 23 |
+
from cupyx.scipy.sparse import _index
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class _compressed_sparse_matrix(sparse_data._data_matrix,
|
| 27 |
+
sparse_data._minmax_mixin,
|
| 28 |
+
_index.IndexMixin):
|
| 29 |
+
|
| 30 |
+
_max_min_reduction_code = r'''
|
| 31 |
+
extern "C" __global__
|
| 32 |
+
void ${func}(double* data, int* x, int* y, int length,
|
| 33 |
+
double* z) {
|
| 34 |
+
// Get the index of the block
|
| 35 |
+
int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
| 36 |
+
|
| 37 |
+
// Calculate the block length
|
| 38 |
+
int block_length = y[tid] - x[tid];
|
| 39 |
+
|
| 40 |
+
// Select initial value based on the block density
|
| 41 |
+
double running_value = 0;
|
| 42 |
+
if (${cond}){
|
| 43 |
+
running_value = data[x[tid]];
|
| 44 |
+
} else {
|
| 45 |
+
running_value = 0;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
// Iterate over the block and update
|
| 49 |
+
for (int entry = x[tid]; entry < y[tid]; entry++){
|
| 50 |
+
if (data[entry] != data[entry]){
|
| 51 |
+
// Check for NaN
|
| 52 |
+
running_value = nan("");
|
| 53 |
+
break;
|
| 54 |
+
} else {
|
| 55 |
+
// Check for a value update
|
| 56 |
+
if (data[entry] ${op} running_value){
|
| 57 |
+
running_value = data[entry];
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
// Store in the return function
|
| 63 |
+
z[tid] = running_value;
|
| 64 |
+
}'''
|
| 65 |
+
|
| 66 |
+
_max_reduction_kern = _core.RawKernel(
|
| 67 |
+
string.Template(_max_min_reduction_code).substitute(
|
| 68 |
+
func='max_reduction', op='>', cond='block_length == length'),
|
| 69 |
+
'max_reduction')
|
| 70 |
+
|
| 71 |
+
_max_nonzero_reduction_kern = _core.RawKernel(
|
| 72 |
+
string.Template(_max_min_reduction_code).substitute(
|
| 73 |
+
func='max_nonzero_reduction', op='>', cond='block_length > 0'),
|
| 74 |
+
'max_nonzero_reduction')
|
| 75 |
+
|
| 76 |
+
_min_reduction_kern = _core.RawKernel(
|
| 77 |
+
string.Template(_max_min_reduction_code).substitute(
|
| 78 |
+
func='min_reduction', op='<', cond='block_length == length'),
|
| 79 |
+
'min_reduction')
|
| 80 |
+
|
| 81 |
+
_min_nonzero_reduction_kern = _core.RawKernel(
|
| 82 |
+
string.Template(_max_min_reduction_code).substitute(
|
| 83 |
+
func='min_nonzero_reduction', op='<', cond='block_length > 0'),
|
| 84 |
+
'min_nonzero_reduction')
|
| 85 |
+
|
| 86 |
+
# For _max_arg_reduction_mod and _min_arg_reduction_mod below, we pick
|
| 87 |
+
# the right template specialization according to input dtypes at runtime.
|
| 88 |
+
# The distinction in int types (T2) is important for portability in OS.
|
| 89 |
+
|
| 90 |
+
_argmax_argmin_code = r'''
|
| 91 |
+
template<typename T1, typename T2> __global__ void
|
| 92 |
+
${func}_arg_reduction(T1* data, int* indices, int* x, int* y,
|
| 93 |
+
int length, T2* z) {
|
| 94 |
+
// Get the index of the block
|
| 95 |
+
int tid = blockIdx.x * blockDim.x + threadIdx.x;
|
| 96 |
+
|
| 97 |
+
// Calculate the block length
|
| 98 |
+
int block_length = y[tid] - x[tid];
|
| 99 |
+
|
| 100 |
+
// Select initial value based on the block density
|
| 101 |
+
int data_index = 0;
|
| 102 |
+
double data_value = 0;
|
| 103 |
+
|
| 104 |
+
if (block_length == length){
|
| 105 |
+
// Block is dense. Fill the first value
|
| 106 |
+
data_value = data[x[tid]];
|
| 107 |
+
data_index = indices[x[tid]];
|
| 108 |
+
} else if (block_length > 0) {
|
| 109 |
+
// Block has at least one zero. Assign first occurrence as the
|
| 110 |
+
// starting reference
|
| 111 |
+
data_value = 0;
|
| 112 |
+
for (data_index = 0; data_index < length; data_index++){
|
| 113 |
+
if (data_index != indices[x[tid] + data_index] ||
|
| 114 |
+
x[tid] + data_index >= y[tid]){
|
| 115 |
+
break;
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
} else {
|
| 119 |
+
// Zero valued array
|
| 120 |
+
data_value = 0;
|
| 121 |
+
data_index = 0;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
// Iterate over the section of the sparse matrix
|
| 125 |
+
for (int entry = x[tid]; entry < y[tid]; entry++){
|
| 126 |
+
if (data[entry] != data[entry]){
|
| 127 |
+
// Check for NaN
|
| 128 |
+
data_value = nan("");
|
| 129 |
+
data_index = 0;
|
| 130 |
+
break;
|
| 131 |
+
} else {
|
| 132 |
+
// Check for a value update
|
| 133 |
+
if (data[entry] ${op} data_value){
|
| 134 |
+
data_index = indices[entry];
|
| 135 |
+
data_value = data[entry];
|
| 136 |
+
}
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
// Store in the return function
|
| 141 |
+
z[tid] = data_index;
|
| 142 |
+
}'''
|
| 143 |
+
|
| 144 |
+
_max_arg_reduction_mod = _core.RawModule(
|
| 145 |
+
code=string.Template(_argmax_argmin_code).substitute(
|
| 146 |
+
func='max', op='>'),
|
| 147 |
+
options=('-std=c++11',),
|
| 148 |
+
name_expressions=['max_arg_reduction<float, int>',
|
| 149 |
+
'max_arg_reduction<float, long long>',
|
| 150 |
+
'max_arg_reduction<double, int>',
|
| 151 |
+
'max_arg_reduction<double, long long>'])
|
| 152 |
+
|
| 153 |
+
_min_arg_reduction_mod = _core.RawModule(
|
| 154 |
+
code=string.Template(_argmax_argmin_code).substitute(
|
| 155 |
+
func='min', op='<'),
|
| 156 |
+
options=('-std=c++11',),
|
| 157 |
+
name_expressions=['min_arg_reduction<float, int>',
|
| 158 |
+
'min_arg_reduction<float, long long>',
|
| 159 |
+
'min_arg_reduction<double, int>',
|
| 160 |
+
'min_arg_reduction<double, long long>'])
|
| 161 |
+
|
| 162 |
+
# TODO(leofang): rewrite a more load-balanced approach than this naive one?
|
| 163 |
+
_has_sorted_indices_kern = _core.ElementwiseKernel(
|
| 164 |
+
'raw T indptr, raw T indices',
|
| 165 |
+
'bool diff',
|
| 166 |
+
'''
|
| 167 |
+
bool diff_out = true;
|
| 168 |
+
for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) {
|
| 169 |
+
if (indices[jj] > indices[jj+1]){
|
| 170 |
+
diff_out = false;
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
diff = diff_out;
|
| 174 |
+
''', 'cupyx_scipy_sparse_has_sorted_indices')
|
| 175 |
+
|
| 176 |
+
# TODO(leofang): rewrite a more load-balanced approach than this naive one?
|
| 177 |
+
_has_canonical_format_kern = _core.ElementwiseKernel(
|
| 178 |
+
'raw T indptr, raw T indices',
|
| 179 |
+
'bool diff',
|
| 180 |
+
'''
|
| 181 |
+
bool diff_out = true;
|
| 182 |
+
if (indptr[i] > indptr[i+1]) {
|
| 183 |
+
diff = false;
|
| 184 |
+
return;
|
| 185 |
+
}
|
| 186 |
+
for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) {
|
| 187 |
+
if (indices[jj] >= indices[jj+1]) {
|
| 188 |
+
diff_out = false;
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
diff = diff_out;
|
| 192 |
+
''', 'cupyx_scipy_sparse_has_canonical_format')
|
| 193 |
+
|
| 194 |
+
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
| 195 |
+
from cupyx import cusparse
|
| 196 |
+
|
| 197 |
+
if shape is not None:
|
| 198 |
+
if not _util.isshape(shape):
|
| 199 |
+
raise ValueError('invalid shape (must be a 2-tuple of int)')
|
| 200 |
+
shape = int(shape[0]), int(shape[1])
|
| 201 |
+
|
| 202 |
+
if _base.issparse(arg1):
|
| 203 |
+
x = arg1.asformat(self.format)
|
| 204 |
+
data = x.data
|
| 205 |
+
indices = x.indices
|
| 206 |
+
indptr = x.indptr
|
| 207 |
+
|
| 208 |
+
if arg1.format != self.format:
|
| 209 |
+
# When formats are different, all arrays are already copied
|
| 210 |
+
copy = False
|
| 211 |
+
|
| 212 |
+
if shape is None:
|
| 213 |
+
shape = arg1.shape
|
| 214 |
+
|
| 215 |
+
elif _util.isshape(arg1):
|
| 216 |
+
m, n = arg1
|
| 217 |
+
m, n = int(m), int(n)
|
| 218 |
+
data = basic.zeros(0, dtype if dtype else 'd')
|
| 219 |
+
indices = basic.zeros(0, 'i')
|
| 220 |
+
indptr = basic.zeros(self._swap(m, n)[0] + 1, dtype='i')
|
| 221 |
+
# shape and copy argument is ignored
|
| 222 |
+
shape = (m, n)
|
| 223 |
+
copy = False
|
| 224 |
+
|
| 225 |
+
elif scipy_available and scipy.sparse.issparse(arg1):
|
| 226 |
+
# Convert scipy.sparse to cupyx.scipy.sparse
|
| 227 |
+
x = arg1.asformat(self.format)
|
| 228 |
+
data = cupy.array(x.data)
|
| 229 |
+
indices = cupy.array(x.indices, dtype='i')
|
| 230 |
+
indptr = cupy.array(x.indptr, dtype='i')
|
| 231 |
+
copy = False
|
| 232 |
+
|
| 233 |
+
if shape is None:
|
| 234 |
+
shape = arg1.shape
|
| 235 |
+
|
| 236 |
+
elif isinstance(arg1, tuple) and len(arg1) == 2:
|
| 237 |
+
# Note: This implementation is not efficeint, as it first
|
| 238 |
+
# constructs a sparse matrix with coo format, then converts it to
|
| 239 |
+
# compressed format.
|
| 240 |
+
sp_coo = _coo.coo_matrix(arg1, shape=shape, dtype=dtype, copy=copy)
|
| 241 |
+
sp_compressed = sp_coo.asformat(self.format)
|
| 242 |
+
data = sp_compressed.data
|
| 243 |
+
indices = sp_compressed.indices
|
| 244 |
+
indptr = sp_compressed.indptr
|
| 245 |
+
|
| 246 |
+
elif isinstance(arg1, tuple) and len(arg1) == 3:
|
| 247 |
+
data, indices, indptr = arg1
|
| 248 |
+
if not (_base.isdense(data) and data.ndim == 1 and
|
| 249 |
+
_base.isdense(indices) and indices.ndim == 1 and
|
| 250 |
+
_base.isdense(indptr) and indptr.ndim == 1):
|
| 251 |
+
raise ValueError(
|
| 252 |
+
'data, indices, and indptr should be 1-D')
|
| 253 |
+
|
| 254 |
+
if len(data) != len(indices):
|
| 255 |
+
raise ValueError('indices and data should have the same size')
|
| 256 |
+
|
| 257 |
+
elif _base.isdense(arg1):
|
| 258 |
+
if arg1.ndim > 2:
|
| 259 |
+
raise TypeError('expected dimension <= 2 array or matrix')
|
| 260 |
+
elif arg1.ndim == 1:
|
| 261 |
+
arg1 = arg1[None]
|
| 262 |
+
elif arg1.ndim == 0:
|
| 263 |
+
arg1 = arg1[None, None]
|
| 264 |
+
data, indices, indptr = self._convert_dense(arg1)
|
| 265 |
+
copy = False
|
| 266 |
+
if shape is None:
|
| 267 |
+
shape = arg1.shape
|
| 268 |
+
|
| 269 |
+
else:
|
| 270 |
+
raise ValueError(
|
| 271 |
+
'Unsupported initializer format')
|
| 272 |
+
|
| 273 |
+
if dtype is None:
|
| 274 |
+
dtype = data.dtype
|
| 275 |
+
else:
|
| 276 |
+
dtype = numpy.dtype(dtype)
|
| 277 |
+
|
| 278 |
+
if dtype.char not in '?fdFD':
|
| 279 |
+
raise ValueError(
|
| 280 |
+
'Only bool, float32, float64, complex64 and complex128 '
|
| 281 |
+
'are supported')
|
| 282 |
+
|
| 283 |
+
data = data.astype(dtype, copy=copy)
|
| 284 |
+
sparse_data._data_matrix.__init__(self, data)
|
| 285 |
+
|
| 286 |
+
self.indices = indices.astype('i', copy=copy)
|
| 287 |
+
self.indptr = indptr.astype('i', copy=copy)
|
| 288 |
+
|
| 289 |
+
if shape is None:
|
| 290 |
+
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
|
| 291 |
+
|
| 292 |
+
major, minor = self._swap(*shape)
|
| 293 |
+
if len(indptr) != major + 1:
|
| 294 |
+
raise ValueError('index pointer size (%d) should be (%d)'
|
| 295 |
+
% (len(indptr), major + 1))
|
| 296 |
+
|
| 297 |
+
self._descr = cusparse.MatDescriptor.create()
|
| 298 |
+
self._shape = shape
|
| 299 |
+
|
| 300 |
+
def _with_data(self, data, copy=True):
|
| 301 |
+
if copy:
|
| 302 |
+
return self.__class__(
|
| 303 |
+
(data, self.indices.copy(), self.indptr.copy()),
|
| 304 |
+
shape=self.shape,
|
| 305 |
+
dtype=data.dtype)
|
| 306 |
+
else:
|
| 307 |
+
return self.__class__(
|
| 308 |
+
(data, self.indices, self.indptr),
|
| 309 |
+
shape=self.shape,
|
| 310 |
+
dtype=data.dtype)
|
| 311 |
+
|
| 312 |
+
def _convert_dense(self, x):
|
| 313 |
+
raise NotImplementedError
|
| 314 |
+
|
| 315 |
+
def _swap(self, x, y):
|
| 316 |
+
raise NotImplementedError
|
| 317 |
+
|
| 318 |
+
def _add_sparse(self, other, alpha, beta):
|
| 319 |
+
raise NotImplementedError
|
| 320 |
+
|
| 321 |
+
def _add(self, other, lhs_negative, rhs_negative):
|
| 322 |
+
if cupy.isscalar(other):
|
| 323 |
+
if other == 0:
|
| 324 |
+
if lhs_negative:
|
| 325 |
+
return -self
|
| 326 |
+
else:
|
| 327 |
+
return self.copy()
|
| 328 |
+
else:
|
| 329 |
+
raise NotImplementedError(
|
| 330 |
+
'adding a nonzero scalar to a sparse matrix is not '
|
| 331 |
+
'supported')
|
| 332 |
+
elif _base.isspmatrix(other):
|
| 333 |
+
alpha = -1 if lhs_negative else 1
|
| 334 |
+
beta = -1 if rhs_negative else 1
|
| 335 |
+
return self._add_sparse(other, alpha, beta)
|
| 336 |
+
elif _base.isdense(other):
|
| 337 |
+
if lhs_negative:
|
| 338 |
+
if rhs_negative:
|
| 339 |
+
return -self.todense() - other
|
| 340 |
+
else:
|
| 341 |
+
return other - self.todense()
|
| 342 |
+
else:
|
| 343 |
+
if rhs_negative:
|
| 344 |
+
return self.todense() - other
|
| 345 |
+
else:
|
| 346 |
+
return self.todense() + other
|
| 347 |
+
else:
|
| 348 |
+
return NotImplemented
|
| 349 |
+
|
| 350 |
+
def __add__(self, other):
|
| 351 |
+
return self._add(other, False, False)
|
| 352 |
+
|
| 353 |
+
def __radd__(self, other):
|
| 354 |
+
return self._add(other, False, False)
|
| 355 |
+
|
| 356 |
+
def __sub__(self, other):
|
| 357 |
+
return self._add(other, False, True)
|
| 358 |
+
|
| 359 |
+
def __rsub__(self, other):
|
| 360 |
+
return self._add(other, True, False)
|
| 361 |
+
|
| 362 |
+
def _get_intXint(self, row, col):
|
| 363 |
+
major, minor = self._swap(row, col)
|
| 364 |
+
data, indices, _ = _index._get_csr_submatrix_major_axis(
|
| 365 |
+
self.data, self.indices, self.indptr, major, major + 1)
|
| 366 |
+
dtype = data.dtype
|
| 367 |
+
res = cupy.zeros((), dtype=dtype)
|
| 368 |
+
if dtype.kind == 'c':
|
| 369 |
+
_index._compress_getitem_complex_kern(
|
| 370 |
+
data.real, data.imag, indices, minor, res.real, res.imag)
|
| 371 |
+
else:
|
| 372 |
+
_index._compress_getitem_kern(data, indices, minor, res)
|
| 373 |
+
return res
|
| 374 |
+
|
| 375 |
+
def _get_sliceXslice(self, row, col):
|
| 376 |
+
major, minor = self._swap(row, col)
|
| 377 |
+
copy = major.step in (1, None)
|
| 378 |
+
return self._major_slice(major)._minor_slice(minor, copy=copy)
|
| 379 |
+
|
| 380 |
+
def _get_arrayXarray(self, row, col, not_found_val=0):
|
| 381 |
+
# inner indexing
|
| 382 |
+
idx_dtype = self.indices.dtype
|
| 383 |
+
M, N = self._swap(*self.shape)
|
| 384 |
+
major, minor = self._swap(row, col)
|
| 385 |
+
major = major.astype(idx_dtype, copy=False)
|
| 386 |
+
minor = minor.astype(idx_dtype, copy=False)
|
| 387 |
+
|
| 388 |
+
val = _index._csr_sample_values(
|
| 389 |
+
M, N, self.indptr, self.indices, self.data,
|
| 390 |
+
major.ravel(), minor.ravel(),
|
| 391 |
+
not_found_val)
|
| 392 |
+
|
| 393 |
+
if major.ndim == 1:
|
| 394 |
+
# Scipy returns `matrix` here
|
| 395 |
+
return cupy.expand_dims(val, 0)
|
| 396 |
+
return self.__class__(val.reshape(major.shape))
|
| 397 |
+
|
| 398 |
+
def _get_columnXarray(self, row, col):
|
| 399 |
+
# outer indexing
|
| 400 |
+
major, minor = self._swap(row, col)
|
| 401 |
+
return self._major_index_fancy(major)._minor_index_fancy(minor)
|
| 402 |
+
|
| 403 |
+
def _major_index_fancy(self, idx):
|
| 404 |
+
"""Index along the major axis where idx is an array of ints.
|
| 405 |
+
"""
|
| 406 |
+
_, N = self._swap(*self.shape)
|
| 407 |
+
M = idx.size
|
| 408 |
+
new_shape = self._swap(M, N)
|
| 409 |
+
if self.nnz == 0 or M == 0:
|
| 410 |
+
return self.__class__(new_shape, dtype=self.dtype)
|
| 411 |
+
|
| 412 |
+
return self.__class__(
|
| 413 |
+
_index._csr_row_index(self.data, self.indices, self.indptr, idx),
|
| 414 |
+
shape=new_shape, copy=False)
|
| 415 |
+
|
| 416 |
+
def _minor_index_fancy(self, idx):
|
| 417 |
+
"""Index along the minor axis where idx is an array of ints.
|
| 418 |
+
"""
|
| 419 |
+
M, _ = self._swap(*self.shape)
|
| 420 |
+
N = idx.size
|
| 421 |
+
new_shape = self._swap(M, N)
|
| 422 |
+
if self.nnz == 0 or N == 0:
|
| 423 |
+
return self.__class__(new_shape, dtype=self.dtype)
|
| 424 |
+
|
| 425 |
+
if idx.size * M < self.nnz:
|
| 426 |
+
# TODO (asi1024): Implement faster algorithm.
|
| 427 |
+
pass
|
| 428 |
+
|
| 429 |
+
return self._tocsx()._major_index_fancy(idx)._tocsx()
|
| 430 |
+
|
| 431 |
+
def _major_slice(self, idx, copy=False):
|
| 432 |
+
"""Index along the major axis where idx is a slice object.
|
| 433 |
+
"""
|
| 434 |
+
M, N = self._swap(*self.shape)
|
| 435 |
+
start, stop, step = idx.indices(M)
|
| 436 |
+
|
| 437 |
+
if start == 0 and stop == M and step == 1:
|
| 438 |
+
return self.copy() if copy else self
|
| 439 |
+
|
| 440 |
+
M = len(range(start, stop, step))
|
| 441 |
+
new_shape = self._swap(M, N)
|
| 442 |
+
|
| 443 |
+
if step == 1:
|
| 444 |
+
if M == 0 or self.nnz == 0:
|
| 445 |
+
return self.__class__(new_shape, dtype=self.dtype)
|
| 446 |
+
return self.__class__(
|
| 447 |
+
_index._get_csr_submatrix_major_axis(
|
| 448 |
+
self.data, self.indices, self.indptr, start, stop),
|
| 449 |
+
shape=new_shape, copy=copy)
|
| 450 |
+
rows = cupy.arange(start, stop, step, dtype=self.indptr.dtype)
|
| 451 |
+
return self._major_index_fancy(rows)
|
| 452 |
+
|
| 453 |
+
def _minor_slice(self, idx, copy=False):
|
| 454 |
+
"""Index along the minor axis where idx is a slice object.
|
| 455 |
+
"""
|
| 456 |
+
M, N = self._swap(*self.shape)
|
| 457 |
+
start, stop, step = idx.indices(N)
|
| 458 |
+
|
| 459 |
+
if start == 0 and stop == N and step == 1:
|
| 460 |
+
return self.copy() if copy else self
|
| 461 |
+
|
| 462 |
+
N = len(range(start, stop, step))
|
| 463 |
+
new_shape = self._swap(M, N)
|
| 464 |
+
|
| 465 |
+
if N == 0 or self.nnz == 0:
|
| 466 |
+
return self.__class__(new_shape, dtype=self.dtype)
|
| 467 |
+
if step == 1:
|
| 468 |
+
return self.__class__(
|
| 469 |
+
_index._get_csr_submatrix_minor_axis(
|
| 470 |
+
self.data, self.indices, self.indptr, start, stop),
|
| 471 |
+
shape=new_shape, copy=False)
|
| 472 |
+
cols = cupy.arange(start, stop, step, dtype=self.indices.dtype)
|
| 473 |
+
return self._minor_index_fancy(cols)
|
| 474 |
+
|
| 475 |
+
def _set_intXint(self, row, col, x):
|
| 476 |
+
i, j = self._swap(row, col)
|
| 477 |
+
self._set_many(i, j, x)
|
| 478 |
+
|
| 479 |
+
def _set_arrayXarray(self, row, col, x):
|
| 480 |
+
i, j = self._swap(row, col)
|
| 481 |
+
self._set_many(i, j, x)
|
| 482 |
+
|
| 483 |
+
def _set_arrayXarray_sparse(self, row, col, x):
|
| 484 |
+
# clear entries that will be overwritten
|
| 485 |
+
self._zero_many(*self._swap(row, col))
|
| 486 |
+
|
| 487 |
+
M, N = row.shape # matches col.shape
|
| 488 |
+
broadcast_row = M != 1 and x.shape[0] == 1
|
| 489 |
+
broadcast_col = N != 1 and x.shape[1] == 1
|
| 490 |
+
r, c = x.row, x.col
|
| 491 |
+
x = cupy.asarray(x.data, dtype=self.dtype)
|
| 492 |
+
if broadcast_row:
|
| 493 |
+
r = cupy.repeat(cupy.arange(M), r.size)
|
| 494 |
+
c = cupy.tile(c, M)
|
| 495 |
+
x = cupy.tile(x, M)
|
| 496 |
+
if broadcast_col:
|
| 497 |
+
r = cupy.repeat(r, N)
|
| 498 |
+
c = cupy.tile(cupy.arange(N), c.size)
|
| 499 |
+
x = cupy.repeat(x, N)
|
| 500 |
+
# only assign entries in the new sparsity structure
|
| 501 |
+
i, j = self._swap(row[r, c], col[r, c])
|
| 502 |
+
self._set_many(i, j, x)
|
| 503 |
+
|
| 504 |
+
def _prepare_indices(self, i, j):
|
| 505 |
+
M, N = self._swap(*self.shape)
|
| 506 |
+
|
| 507 |
+
def check_bounds(indices, bound):
|
| 508 |
+
idx = indices.max()
|
| 509 |
+
if idx >= bound:
|
| 510 |
+
raise IndexError('index (%d) out of range (>= %d)' %
|
| 511 |
+
(idx, bound))
|
| 512 |
+
idx = indices.min()
|
| 513 |
+
if idx < -bound:
|
| 514 |
+
raise IndexError('index (%d) out of range (< -%d)' %
|
| 515 |
+
(idx, bound))
|
| 516 |
+
|
| 517 |
+
i = cupy.array(i, dtype=self.indptr.dtype,
|
| 518 |
+
copy=True, ndmin=1).ravel()
|
| 519 |
+
j = cupy.array(j, dtype=self.indices.dtype,
|
| 520 |
+
copy=True, ndmin=1).ravel()
|
| 521 |
+
check_bounds(i, M)
|
| 522 |
+
check_bounds(j, N)
|
| 523 |
+
return i, j, M, N
|
| 524 |
+
|
| 525 |
+
def _set_many(self, i, j, x):
|
| 526 |
+
"""Sets value at each (i, j) to x
|
| 527 |
+
Here (i,j) index major and minor respectively, and must not contain
|
| 528 |
+
duplicate entries.
|
| 529 |
+
"""
|
| 530 |
+
i, j, M, N = self._prepare_indices(i, j)
|
| 531 |
+
x = cupy.array(x, dtype=self.dtype, copy=True, ndmin=1).ravel()
|
| 532 |
+
|
| 533 |
+
new_sp = cupyx.scipy.sparse.csr_matrix(
|
| 534 |
+
(cupy.arange(self.nnz, dtype=cupy.float32),
|
| 535 |
+
self.indices, self.indptr), shape=(M, N))
|
| 536 |
+
|
| 537 |
+
offsets = new_sp._get_arrayXarray(
|
| 538 |
+
i, j, not_found_val=-1).astype(cupy.int32).ravel()
|
| 539 |
+
|
| 540 |
+
mask = offsets > -1
|
| 541 |
+
self.data[offsets[mask]] = x[mask]
|
| 542 |
+
|
| 543 |
+
if mask.all():
|
| 544 |
+
# only affects existing non-zero cells
|
| 545 |
+
return
|
| 546 |
+
|
| 547 |
+
# only insertions remain
|
| 548 |
+
warnings.warn('Changing the sparsity structure of a '
|
| 549 |
+
'{}_matrix is expensive.'.format(self.format),
|
| 550 |
+
_base.SparseEfficiencyWarning)
|
| 551 |
+
mask = ~mask
|
| 552 |
+
i = i[mask]
|
| 553 |
+
i[i < 0] += M
|
| 554 |
+
j = j[mask]
|
| 555 |
+
j[j < 0] += N
|
| 556 |
+
self._insert_many(i, j, x[mask])
|
| 557 |
+
|
| 558 |
+
def _zero_many(self, i, j):
|
| 559 |
+
"""Sets value at each (i, j) to zero, preserving sparsity structure.
|
| 560 |
+
Here (i,j) index major and minor respectively.
|
| 561 |
+
"""
|
| 562 |
+
i, j, M, N = self._prepare_indices(i, j)
|
| 563 |
+
|
| 564 |
+
new_sp = cupyx.scipy.sparse.csr_matrix(
|
| 565 |
+
(cupy.arange(self.nnz, dtype=cupy.float32),
|
| 566 |
+
self.indices, self.indptr), shape=(M, N))
|
| 567 |
+
|
| 568 |
+
offsets = new_sp._get_arrayXarray(
|
| 569 |
+
i, j, not_found_val=-1).astype(cupy.int32).ravel()
|
| 570 |
+
|
| 571 |
+
# only assign zeros to the existing sparsity structure
|
| 572 |
+
self.data[offsets[offsets > -1]] = 0
|
| 573 |
+
|
| 574 |
+
def _perform_insert(self, indices_inserts, data_inserts,
|
| 575 |
+
rows, row_counts, idx_dtype):
|
| 576 |
+
"""Insert new elements into current sparse matrix in sorted order"""
|
| 577 |
+
indptr_diff = cupy.diff(self.indptr)
|
| 578 |
+
indptr_diff[rows] += row_counts
|
| 579 |
+
|
| 580 |
+
new_indptr = cupy.empty(self.indptr.shape, dtype=idx_dtype)
|
| 581 |
+
new_indptr[0] = idx_dtype(0)
|
| 582 |
+
new_indptr[1:] = indptr_diff
|
| 583 |
+
|
| 584 |
+
# Build output arrays
|
| 585 |
+
cupy.cumsum(new_indptr, out=new_indptr)
|
| 586 |
+
out_nnz = int(new_indptr[-1])
|
| 587 |
+
|
| 588 |
+
new_indices = cupy.empty(out_nnz, dtype=idx_dtype)
|
| 589 |
+
new_data = cupy.empty(out_nnz, dtype=self.data.dtype)
|
| 590 |
+
|
| 591 |
+
# Build an indexed indptr that contains the offsets for each
|
| 592 |
+
# row but only for in i, j, and x.
|
| 593 |
+
new_indptr_lookup = cupy.zeros(new_indptr.size, dtype=idx_dtype)
|
| 594 |
+
new_indptr_lookup[1:][rows] = row_counts
|
| 595 |
+
cupy.cumsum(new_indptr_lookup, out=new_indptr_lookup)
|
| 596 |
+
|
| 597 |
+
_index._insert_many_populate_arrays(
|
| 598 |
+
indices_inserts, data_inserts, new_indptr_lookup,
|
| 599 |
+
self.indptr, self.indices, self.data, new_indptr, new_indices,
|
| 600 |
+
new_data, size=self.indptr.size-1)
|
| 601 |
+
|
| 602 |
+
self.indptr = new_indptr
|
| 603 |
+
self.indices = new_indices
|
| 604 |
+
self.data = new_data
|
| 605 |
+
|
| 606 |
+
def _insert_many(self, i, j, x):
|
| 607 |
+
"""Inserts new nonzero at each (i, j) with value x
|
| 608 |
+
Here (i,j) index major and minor respectively.
|
| 609 |
+
i, j and x must be non-empty, 1d arrays.
|
| 610 |
+
Inserts each major group (e.g. all entries per row) at a time.
|
| 611 |
+
Maintains has_sorted_indices property.
|
| 612 |
+
Modifies i, j, x in place.
|
| 613 |
+
"""
|
| 614 |
+
|
| 615 |
+
order = cupy.argsort(i) # stable for duplicates
|
| 616 |
+
i = i.take(order)
|
| 617 |
+
j = j.take(order)
|
| 618 |
+
x = x.take(order)
|
| 619 |
+
|
| 620 |
+
# Update index data type
|
| 621 |
+
|
| 622 |
+
idx_dtype = _sputils.get_index_dtype(
|
| 623 |
+
(self.indices, self.indptr), maxval=(
|
| 624 |
+
self.nnz + x.size))
|
| 625 |
+
|
| 626 |
+
self.indptr = self.indptr.astype(idx_dtype)
|
| 627 |
+
self.indices = self.indices.astype(idx_dtype)
|
| 628 |
+
self.data = self.data.astype(self.dtype)
|
| 629 |
+
|
| 630 |
+
indptr_inserts, indices_inserts, data_inserts = \
|
| 631 |
+
_index._select_last_indices(i, j, x, idx_dtype)
|
| 632 |
+
|
| 633 |
+
rows, ui_indptr = cupy.unique(indptr_inserts, return_index=True)
|
| 634 |
+
|
| 635 |
+
to_add = cupy.empty(ui_indptr.size+1, ui_indptr.dtype)
|
| 636 |
+
to_add[-1] = j.size
|
| 637 |
+
to_add[:-1] = ui_indptr
|
| 638 |
+
ui_indptr = to_add
|
| 639 |
+
|
| 640 |
+
# Compute the counts for each row in the insertion array
|
| 641 |
+
row_counts = cupy.zeros(ui_indptr.size-1, dtype=idx_dtype)
|
| 642 |
+
cupy.add.at(row_counts, cupy.searchsorted(rows, indptr_inserts), 1)
|
| 643 |
+
|
| 644 |
+
self._perform_insert(indices_inserts, data_inserts,
|
| 645 |
+
rows, row_counts, idx_dtype)
|
| 646 |
+
|
| 647 |
+
def __get_has_canonical_format(self):
|
| 648 |
+
"""Determine whether the matrix has sorted indices and no duplicates.
|
| 649 |
+
|
| 650 |
+
Returns
|
| 651 |
+
bool: ``True`` if the above applies, otherwise ``False``.
|
| 652 |
+
|
| 653 |
+
.. note::
|
| 654 |
+
:attr:`has_canonical_format` implies :attr:`has_sorted_indices`, so
|
| 655 |
+
if the latter flag is ``False``, so will the former be; if the
|
| 656 |
+
former is found ``True``, the latter flag is also set.
|
| 657 |
+
|
| 658 |
+
.. warning::
|
| 659 |
+
Getting this property might synchronize the device.
|
| 660 |
+
|
| 661 |
+
"""
|
| 662 |
+
# Modified from the SciPy counterpart.
|
| 663 |
+
|
| 664 |
+
# In CuPy the implemented conversions do not exactly match those of
|
| 665 |
+
# SciPy's, so it's hard to put this exactly as where it is in SciPy,
|
| 666 |
+
# but this should do the job.
|
| 667 |
+
if self.data.size == 0:
|
| 668 |
+
self._has_canonical_format = True
|
| 669 |
+
# check to see if result was cached
|
| 670 |
+
elif not getattr(self, '_has_sorted_indices', True):
|
| 671 |
+
# not sorted => not canonical
|
| 672 |
+
self._has_canonical_format = False
|
| 673 |
+
elif not hasattr(self, '_has_canonical_format'):
|
| 674 |
+
is_canonical = self._has_canonical_format_kern(
|
| 675 |
+
self.indptr, self.indices, size=self.indptr.size-1)
|
| 676 |
+
self._has_canonical_format = bool(is_canonical.all())
|
| 677 |
+
return self._has_canonical_format
|
| 678 |
+
|
| 679 |
+
def __set_has_canonical_format(self, val):
|
| 680 |
+
"""Taken from SciPy as is."""
|
| 681 |
+
self._has_canonical_format = bool(val)
|
| 682 |
+
if val:
|
| 683 |
+
self.has_sorted_indices = True
|
| 684 |
+
|
| 685 |
+
has_canonical_format = property(fget=__get_has_canonical_format,
|
| 686 |
+
fset=__set_has_canonical_format)
|
| 687 |
+
|
| 688 |
+
def __get_sorted(self):
|
| 689 |
+
"""Determine whether the matrix has sorted indices.
|
| 690 |
+
|
| 691 |
+
Returns
|
| 692 |
+
bool:
|
| 693 |
+
``True`` if the indices of the matrix are in sorted order,
|
| 694 |
+
otherwise ``False``.
|
| 695 |
+
|
| 696 |
+
.. warning::
|
| 697 |
+
Getting this property might synchronize the device.
|
| 698 |
+
|
| 699 |
+
"""
|
| 700 |
+
# Modified from the SciPy counterpart.
|
| 701 |
+
|
| 702 |
+
# In CuPy the implemented conversions do not exactly match those of
|
| 703 |
+
# SciPy's, so it's hard to put this exactly as where it is in SciPy,
|
| 704 |
+
# but this should do the job.
|
| 705 |
+
if self.data.size == 0:
|
| 706 |
+
self._has_sorted_indices = True
|
| 707 |
+
# check to see if result was cached
|
| 708 |
+
elif not hasattr(self, '_has_sorted_indices'):
|
| 709 |
+
is_sorted = self._has_sorted_indices_kern(
|
| 710 |
+
self.indptr, self.indices, size=self.indptr.size-1)
|
| 711 |
+
self._has_sorted_indices = bool(is_sorted.all())
|
| 712 |
+
return self._has_sorted_indices
|
| 713 |
+
|
| 714 |
+
def __set_sorted(self, val):
|
| 715 |
+
self._has_sorted_indices = bool(val)
|
| 716 |
+
|
| 717 |
+
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
|
| 718 |
+
|
| 719 |
+
def get_shape(self):
|
| 720 |
+
"""Returns the shape of the matrix.
|
| 721 |
+
|
| 722 |
+
Returns:
|
| 723 |
+
tuple: Shape of the matrix.
|
| 724 |
+
|
| 725 |
+
"""
|
| 726 |
+
return self._shape
|
| 727 |
+
|
| 728 |
+
def getnnz(self, axis=None):
|
| 729 |
+
"""Returns the number of stored values, including explicit zeros.
|
| 730 |
+
|
| 731 |
+
Args:
|
| 732 |
+
axis: Not supported yet.
|
| 733 |
+
|
| 734 |
+
Returns:
|
| 735 |
+
int: The number of stored values.
|
| 736 |
+
|
| 737 |
+
"""
|
| 738 |
+
if axis is None:
|
| 739 |
+
return self.data.size
|
| 740 |
+
else:
|
| 741 |
+
raise ValueError
|
| 742 |
+
|
| 743 |
+
def sorted_indices(self):
|
| 744 |
+
"""Return a copy of this matrix with sorted indices
|
| 745 |
+
|
| 746 |
+
.. warning::
|
| 747 |
+
Calling this function might synchronize the device.
|
| 748 |
+
"""
|
| 749 |
+
# Taken from SciPy as is.
|
| 750 |
+
A = self.copy()
|
| 751 |
+
A.sort_indices()
|
| 752 |
+
return A
|
| 753 |
+
|
| 754 |
+
def sort_indices(self):
|
| 755 |
+
# Unlike in SciPy, here this is implemented in child classes because
|
| 756 |
+
# each child needs to call its own sort function from cuSPARSE
|
| 757 |
+
raise NotImplementedError
|
| 758 |
+
|
| 759 |
+
def sum_duplicates(self):
|
| 760 |
+
"""Eliminate duplicate matrix entries by adding them together.
|
| 761 |
+
|
| 762 |
+
.. note::
|
| 763 |
+
This is an *in place* operation.
|
| 764 |
+
|
| 765 |
+
.. warning::
|
| 766 |
+
Calling this function might synchronize the device.
|
| 767 |
+
|
| 768 |
+
.. seealso::
|
| 769 |
+
:meth:`scipy.sparse.csr_matrix.sum_duplicates`,
|
| 770 |
+
:meth:`scipy.sparse.csc_matrix.sum_duplicates`
|
| 771 |
+
"""
|
| 772 |
+
if self.has_canonical_format:
|
| 773 |
+
return
|
| 774 |
+
# TODO(leofang): add a kernel for compressed sparse matrices without
|
| 775 |
+
# converting to coo
|
| 776 |
+
coo = self.tocoo()
|
| 777 |
+
coo.sum_duplicates()
|
| 778 |
+
self.__init__(coo.asformat(self.format))
|
| 779 |
+
self.has_canonical_format = True
|
| 780 |
+
|
| 781 |
+
#####################
|
| 782 |
+
# Reduce operations #
|
| 783 |
+
#####################
|
| 784 |
+
|
| 785 |
+
def _minor_reduce(self, ufunc, axis, nonzero):
|
| 786 |
+
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
|
| 787 |
+
|
| 788 |
+
Can be applied to a function of self.data by supplying data parameter.
|
| 789 |
+
Warning: this does not call sum_duplicates()
|
| 790 |
+
|
| 791 |
+
Args:
|
| 792 |
+
ufunc (object): Function handle giving the operation to be
|
| 793 |
+
conducted.
|
| 794 |
+
axis (int): Matrix over which the reduction should be
|
| 795 |
+
conducted.
|
| 796 |
+
|
| 797 |
+
Returns:
|
| 798 |
+
(cupy.ndarray): Reduce result for nonzeros in each
|
| 799 |
+
major_index.
|
| 800 |
+
|
| 801 |
+
"""
|
| 802 |
+
out_shape = self.shape[1 - axis]
|
| 803 |
+
# Call to the appropriate kernel function
|
| 804 |
+
out = cupy.zeros(out_shape).astype(cupy.float64)
|
| 805 |
+
if nonzero:
|
| 806 |
+
kerns = {cupy.amax: self._max_nonzero_reduction_kern,
|
| 807 |
+
cupy.amin: self._min_nonzero_reduction_kern}
|
| 808 |
+
else:
|
| 809 |
+
kerns = {cupy.amax: self._max_reduction_kern,
|
| 810 |
+
cupy.amin: self._min_reduction_kern}
|
| 811 |
+
|
| 812 |
+
kerns[ufunc]((out_shape,), (1,),
|
| 813 |
+
(self.data.astype(cupy.float64),
|
| 814 |
+
self.indptr[:len(self.indptr) - 1],
|
| 815 |
+
self.indptr[1:], cupy.int64(self.shape[axis]),
|
| 816 |
+
out))
|
| 817 |
+
|
| 818 |
+
return out
|
| 819 |
+
|
| 820 |
+
def _arg_minor_reduce(self, ufunc, axis):
|
| 821 |
+
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
|
| 822 |
+
|
| 823 |
+
Can be applied to a function of self.data by supplying data parameter.
|
| 824 |
+
Warning: this does not call sum_duplicates()
|
| 825 |
+
|
| 826 |
+
Args:
|
| 827 |
+
ufunc (object): Function handle giving the operation to be
|
| 828 |
+
conducted.
|
| 829 |
+
axis (int): Maxtrix over which the reduction should be conducted
|
| 830 |
+
|
| 831 |
+
Returns:
|
| 832 |
+
(cupy.ndarray): Reduce result for nonzeros in each
|
| 833 |
+
major_index
|
| 834 |
+
|
| 835 |
+
"""
|
| 836 |
+
|
| 837 |
+
# Call to the appropriate kernel function
|
| 838 |
+
# Create the vector to hold output
|
| 839 |
+
# Note: it's important to set "int" here, following what SciPy
|
| 840 |
+
# does, as the outcome dtype is platform dependent
|
| 841 |
+
out_shape = self.shape[1 - axis]
|
| 842 |
+
out = cupy.zeros(out_shape, dtype=int)
|
| 843 |
+
|
| 844 |
+
# Perform the calculation
|
| 845 |
+
ker_name = '_arg_reduction<{}, {}>'.format(
|
| 846 |
+
_scalar.get_typename(self.data.dtype),
|
| 847 |
+
_scalar.get_typename(out.dtype))
|
| 848 |
+
|
| 849 |
+
if ufunc == cupy.argmax:
|
| 850 |
+
ker = self._max_arg_reduction_mod.get_function('max' + ker_name)
|
| 851 |
+
elif ufunc == cupy.argmin:
|
| 852 |
+
ker = self._min_arg_reduction_mod.get_function('min' + ker_name)
|
| 853 |
+
|
| 854 |
+
ker((out_shape,), (1,),
|
| 855 |
+
(self.data, self.indices,
|
| 856 |
+
self.indptr[:len(self.indptr) - 1],
|
| 857 |
+
self.indptr[1:], cupy.int64(self.shape[axis]),
|
| 858 |
+
out))
|
| 859 |
+
|
| 860 |
+
return out
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_construct.py
ADDED
|
@@ -0,0 +1,582 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
import cupy
|
| 3 |
+
from cupyx.scipy.sparse import _coo
|
| 4 |
+
from cupyx.scipy.sparse import _csc
|
| 5 |
+
from cupyx.scipy.sparse import _csr
|
| 6 |
+
from cupyx.scipy.sparse import _dia
|
| 7 |
+
from cupyx.scipy.sparse import _sputils
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def eye(m, n=None, k=0, dtype='d', format=None):
|
| 11 |
+
"""Creates a sparse matrix with ones on diagonal.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
m (int): Number of rows.
|
| 15 |
+
n (int or None): Number of columns. If it is ``None``,
|
| 16 |
+
it makes a square matrix.
|
| 17 |
+
k (int): Diagonal to place ones on.
|
| 18 |
+
dtype: Type of a matrix to create.
|
| 19 |
+
format (str or None): Format of the result, e.g. ``format="csr"``.
|
| 20 |
+
|
| 21 |
+
Returns:
|
| 22 |
+
cupyx.scipy.sparse.spmatrix: Created sparse matrix.
|
| 23 |
+
|
| 24 |
+
.. seealso:: :func:`scipy.sparse.eye`
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
if n is None:
|
| 28 |
+
n = m
|
| 29 |
+
m, n = int(m), int(n)
|
| 30 |
+
|
| 31 |
+
if m == n and k == 0:
|
| 32 |
+
if format in ['csr', 'csc']:
|
| 33 |
+
indptr = cupy.arange(n + 1, dtype='i')
|
| 34 |
+
indices = cupy.arange(n, dtype='i')
|
| 35 |
+
data = cupy.ones(n, dtype=dtype)
|
| 36 |
+
if format == 'csr':
|
| 37 |
+
cls = _csr.csr_matrix
|
| 38 |
+
else:
|
| 39 |
+
cls = _csc.csc_matrix
|
| 40 |
+
return cls((data, indices, indptr), (n, n))
|
| 41 |
+
|
| 42 |
+
elif format == 'coo':
|
| 43 |
+
row = cupy.arange(n, dtype='i')
|
| 44 |
+
col = cupy.arange(n, dtype='i')
|
| 45 |
+
data = cupy.ones(n, dtype=dtype)
|
| 46 |
+
return _coo.coo_matrix((data, (row, col)), (n, n))
|
| 47 |
+
|
| 48 |
+
diags = cupy.ones((1, max(0, min(m + k, n))), dtype=dtype)
|
| 49 |
+
return spdiags(diags, k, m, n).asformat(format)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def identity(n, dtype='d', format=None):
|
| 53 |
+
"""Creates an identity matrix in sparse format.
|
| 54 |
+
|
| 55 |
+
.. note::
|
| 56 |
+
Currently it only supports csr, csc and coo formats.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
n (int): Number of rows and columns.
|
| 60 |
+
dtype: Type of a matrix to create.
|
| 61 |
+
format (str or None): Format of the result, e.g. ``format="csr"``.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
cupyx.scipy.sparse.spmatrix: Created identity matrix.
|
| 65 |
+
|
| 66 |
+
.. seealso:: :func:`scipy.sparse.identity`
|
| 67 |
+
|
| 68 |
+
"""
|
| 69 |
+
return eye(n, n, dtype=dtype, format=format)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def spdiags(data, diags, m, n, format=None):
|
| 73 |
+
"""Creates a sparse matrix from diagonals.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
data (cupy.ndarray): Matrix diagonals stored row-wise.
|
| 77 |
+
diags (cupy.ndarray): Diagonals to set.
|
| 78 |
+
m (int): Number of rows.
|
| 79 |
+
n (int): Number of cols.
|
| 80 |
+
format (str or None): Sparse format, e.g. ``format="csr"``.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
cupyx.scipy.sparse.spmatrix: Created sparse matrix.
|
| 84 |
+
|
| 85 |
+
.. seealso:: :func:`scipy.sparse.spdiags`
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
return _dia.dia_matrix((data, diags), shape=(m, n)).asformat(format)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _compressed_sparse_stack(blocks, axis):
|
| 92 |
+
"""Fast path for stacking CSR/CSC matrices
|
| 93 |
+
(i) vstack for CSR, (ii) hstack for CSC.
|
| 94 |
+
"""
|
| 95 |
+
other_axis = 1 if axis == 0 else 0
|
| 96 |
+
data = cupy.concatenate([b.data for b in blocks])
|
| 97 |
+
constant_dim = blocks[0].shape[other_axis]
|
| 98 |
+
idx_dtype = _sputils.get_index_dtype(arrays=[b.indptr for b in blocks],
|
| 99 |
+
maxval=max(data.size, constant_dim))
|
| 100 |
+
indices = cupy.empty(data.size, dtype=idx_dtype)
|
| 101 |
+
indptr = cupy.empty(sum(b.shape[axis]
|
| 102 |
+
for b in blocks) + 1, dtype=idx_dtype)
|
| 103 |
+
last_indptr = idx_dtype(0)
|
| 104 |
+
sum_dim = 0
|
| 105 |
+
sum_indices = 0
|
| 106 |
+
for b in blocks:
|
| 107 |
+
if b.shape[other_axis] != constant_dim:
|
| 108 |
+
raise ValueError(
|
| 109 |
+
'incompatible dimensions for axis %d' % other_axis)
|
| 110 |
+
indices[sum_indices:sum_indices+b.indices.size] = b.indices
|
| 111 |
+
sum_indices += b.indices.size
|
| 112 |
+
idxs = slice(sum_dim, sum_dim + b.shape[axis])
|
| 113 |
+
indptr[idxs] = b.indptr[:-1]
|
| 114 |
+
indptr[idxs] += last_indptr
|
| 115 |
+
sum_dim += b.shape[axis]
|
| 116 |
+
last_indptr += b.indptr[-1]
|
| 117 |
+
indptr[-1] = last_indptr
|
| 118 |
+
if axis == 0:
|
| 119 |
+
return _csr.csr_matrix((data, indices, indptr),
|
| 120 |
+
shape=(sum_dim, constant_dim))
|
| 121 |
+
else:
|
| 122 |
+
return _csc.csc_matrix((data, indices, indptr),
|
| 123 |
+
shape=(constant_dim, sum_dim))
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def hstack(blocks, format=None, dtype=None):
|
| 127 |
+
"""Stacks sparse matrices horizontally (column wise)
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
blocks (sequence of cupyx.scipy.sparse.spmatrix):
|
| 131 |
+
sparse matrices to stack
|
| 132 |
+
|
| 133 |
+
format (str):
|
| 134 |
+
sparse format of the result (e.g. "csr")
|
| 135 |
+
by default an appropriate sparse matrix format is returned.
|
| 136 |
+
This choice is subject to change.
|
| 137 |
+
dtype (dtype, optional):
|
| 138 |
+
The data-type of the output matrix. If not given, the dtype is
|
| 139 |
+
determined from that of ``blocks``.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
cupyx.scipy.sparse.spmatrix: the stacked sparse matrix
|
| 143 |
+
|
| 144 |
+
.. seealso:: :func:`scipy.sparse.hstack`
|
| 145 |
+
|
| 146 |
+
Examples:
|
| 147 |
+
>>> from cupy import array
|
| 148 |
+
>>> from cupyx.scipy.sparse import csr_matrix, hstack
|
| 149 |
+
>>> A = csr_matrix(array([[1., 2.], [3., 4.]]))
|
| 150 |
+
>>> B = csr_matrix(array([[5.], [6.]]))
|
| 151 |
+
>>> hstack([A, B]).toarray()
|
| 152 |
+
array([[1., 2., 5.],
|
| 153 |
+
[3., 4., 6.]])
|
| 154 |
+
"""
|
| 155 |
+
return bmat([blocks], format=format, dtype=dtype)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def vstack(blocks, format=None, dtype=None):
|
| 159 |
+
"""Stacks sparse matrices vertically (row wise)
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
blocks (sequence of cupyx.scipy.sparse.spmatrix)
|
| 163 |
+
sparse matrices to stack
|
| 164 |
+
format (str, optional):
|
| 165 |
+
sparse format of the result (e.g. "csr")
|
| 166 |
+
by default an appropriate sparse matrix format is returned.
|
| 167 |
+
This choice is subject to change.
|
| 168 |
+
dtype (dtype, optional):
|
| 169 |
+
The data-type of the output matrix. If not given, the dtype is
|
| 170 |
+
determined from that of `blocks`.
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
cupyx.scipy.sparse.spmatrix: the stacked sparse matrix
|
| 174 |
+
|
| 175 |
+
.. seealso:: :func:`scipy.sparse.vstack`
|
| 176 |
+
|
| 177 |
+
Examples:
|
| 178 |
+
>>> from cupy import array
|
| 179 |
+
>>> from cupyx.scipy.sparse import csr_matrix, vstack
|
| 180 |
+
>>> A = csr_matrix(array([[1., 2.], [3., 4.]]))
|
| 181 |
+
>>> B = csr_matrix(array([[5., 6.]]))
|
| 182 |
+
>>> vstack([A, B]).toarray()
|
| 183 |
+
array([[1., 2.],
|
| 184 |
+
[3., 4.],
|
| 185 |
+
[5., 6.]])
|
| 186 |
+
"""
|
| 187 |
+
return bmat([[b] for b in blocks], format=format, dtype=dtype)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def bmat(blocks, format=None, dtype=None):
|
| 191 |
+
"""Builds a sparse matrix from sparse sub-blocks
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
blocks (array_like):
|
| 195 |
+
Grid of sparse matrices with compatible shapes.
|
| 196 |
+
An entry of None implies an all-zero matrix.
|
| 197 |
+
format ({'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional):
|
| 198 |
+
The sparse format of the result (e.g. "csr"). By default an
|
| 199 |
+
appropriate sparse matrix format is returned.
|
| 200 |
+
This choice is subject to change.
|
| 201 |
+
dtype (dtype, optional):
|
| 202 |
+
The data-type of the output matrix. If not given, the dtype is
|
| 203 |
+
determined from that of `blocks`.
|
| 204 |
+
Returns:
|
| 205 |
+
bmat (sparse matrix)
|
| 206 |
+
|
| 207 |
+
.. seealso:: :func:`scipy.sparse.bmat`
|
| 208 |
+
|
| 209 |
+
Examples:
|
| 210 |
+
>>> from cupy import array
|
| 211 |
+
>>> from cupyx.scipy.sparse import csr_matrix, bmat
|
| 212 |
+
>>> A = csr_matrix(array([[1., 2.], [3., 4.]]))
|
| 213 |
+
>>> B = csr_matrix(array([[5.], [6.]]))
|
| 214 |
+
>>> C = csr_matrix(array([[7.]]))
|
| 215 |
+
>>> bmat([[A, B], [None, C]]).toarray()
|
| 216 |
+
array([[1., 2., 5.],
|
| 217 |
+
[3., 4., 6.],
|
| 218 |
+
[0., 0., 7.]])
|
| 219 |
+
>>> bmat([[A, None], [None, C]]).toarray()
|
| 220 |
+
array([[1., 2., 0.],
|
| 221 |
+
[3., 4., 0.],
|
| 222 |
+
[0., 0., 7.]])
|
| 223 |
+
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
# We assume here that blocks will be 2-D so we need to look, at most,
|
| 227 |
+
# 2 layers deep for the shape
|
| 228 |
+
# TODO(Corey J. Nolet): Check this assumption and raise ValueError
|
| 229 |
+
|
| 230 |
+
# NOTE: We can't follow scipy exactly here
|
| 231 |
+
# since we don't have an `object` datatype
|
| 232 |
+
M = len(blocks)
|
| 233 |
+
N = len(blocks[0])
|
| 234 |
+
|
| 235 |
+
blocks_flat = []
|
| 236 |
+
for m in range(M):
|
| 237 |
+
for n in range(N):
|
| 238 |
+
if blocks[m][n] is not None:
|
| 239 |
+
blocks_flat.append(blocks[m][n])
|
| 240 |
+
|
| 241 |
+
if len(blocks_flat) == 0:
|
| 242 |
+
return _coo.coo_matrix((0, 0), dtype=dtype)
|
| 243 |
+
|
| 244 |
+
# check for fast path cases
|
| 245 |
+
if (N == 1 and format in (None, 'csr') and
|
| 246 |
+
all(isinstance(b, _csr.csr_matrix)
|
| 247 |
+
for b in blocks_flat)):
|
| 248 |
+
A = _compressed_sparse_stack(blocks_flat, 0)
|
| 249 |
+
if dtype is not None:
|
| 250 |
+
A = A.astype(dtype)
|
| 251 |
+
return A
|
| 252 |
+
elif (M == 1 and format in (None, 'csc')
|
| 253 |
+
and all(isinstance(b, _csc.csc_matrix) for b in blocks_flat)):
|
| 254 |
+
A = _compressed_sparse_stack(blocks_flat, 1)
|
| 255 |
+
if dtype is not None:
|
| 256 |
+
A = A.astype(dtype)
|
| 257 |
+
return A
|
| 258 |
+
|
| 259 |
+
block_mask = numpy.zeros((M, N), dtype=bool)
|
| 260 |
+
brow_lengths = numpy.zeros(M+1, dtype=numpy.int64)
|
| 261 |
+
bcol_lengths = numpy.zeros(N+1, dtype=numpy.int64)
|
| 262 |
+
|
| 263 |
+
# convert everything to COO format
|
| 264 |
+
for i in range(M):
|
| 265 |
+
for j in range(N):
|
| 266 |
+
if blocks[i][j] is not None:
|
| 267 |
+
A = _coo.coo_matrix(blocks[i][j])
|
| 268 |
+
blocks[i][j] = A
|
| 269 |
+
block_mask[i][j] = True
|
| 270 |
+
|
| 271 |
+
if brow_lengths[i+1] == 0:
|
| 272 |
+
brow_lengths[i+1] = A.shape[0]
|
| 273 |
+
elif brow_lengths[i+1] != A.shape[0]:
|
| 274 |
+
msg = ('blocks[{i},:] has incompatible row dimensions. '
|
| 275 |
+
'Got blocks[{i},{j}].shape[0] == {got}, '
|
| 276 |
+
'expected {exp}.'.format(i=i, j=j,
|
| 277 |
+
exp=brow_lengths[i+1],
|
| 278 |
+
got=A.shape[0]))
|
| 279 |
+
raise ValueError(msg)
|
| 280 |
+
|
| 281 |
+
if bcol_lengths[j+1] == 0:
|
| 282 |
+
bcol_lengths[j+1] = A.shape[1]
|
| 283 |
+
elif bcol_lengths[j+1] != A.shape[1]:
|
| 284 |
+
msg = ('blocks[:,{j}] has incompatible row dimensions. '
|
| 285 |
+
'Got blocks[{i},{j}].shape[1] == {got}, '
|
| 286 |
+
'expected {exp}.'.format(i=i, j=j,
|
| 287 |
+
exp=bcol_lengths[j+1],
|
| 288 |
+
got=A.shape[1]))
|
| 289 |
+
raise ValueError(msg)
|
| 290 |
+
|
| 291 |
+
nnz = sum(block.nnz for block in blocks_flat)
|
| 292 |
+
if dtype is None:
|
| 293 |
+
all_dtypes = [blk.dtype for blk in blocks_flat]
|
| 294 |
+
dtype = _sputils.upcast(*all_dtypes) if all_dtypes else None
|
| 295 |
+
|
| 296 |
+
row_offsets = numpy.cumsum(brow_lengths)
|
| 297 |
+
col_offsets = numpy.cumsum(bcol_lengths)
|
| 298 |
+
|
| 299 |
+
shape = (row_offsets[-1], col_offsets[-1])
|
| 300 |
+
|
| 301 |
+
data = cupy.empty(nnz, dtype=dtype)
|
| 302 |
+
idx_dtype = _sputils.get_index_dtype(maxval=max(shape))
|
| 303 |
+
row = cupy.empty(nnz, dtype=idx_dtype)
|
| 304 |
+
col = cupy.empty(nnz, dtype=idx_dtype)
|
| 305 |
+
|
| 306 |
+
nnz = 0
|
| 307 |
+
ii, jj = numpy.nonzero(block_mask)
|
| 308 |
+
for i, j in zip(ii, jj):
|
| 309 |
+
B = blocks[int(i)][int(j)]
|
| 310 |
+
idx = slice(nnz, nnz + B.nnz)
|
| 311 |
+
data[idx] = B.data
|
| 312 |
+
row[idx] = B.row + row_offsets[i]
|
| 313 |
+
col[idx] = B.col + col_offsets[j]
|
| 314 |
+
nnz += B.nnz
|
| 315 |
+
|
| 316 |
+
return _coo.coo_matrix((data, (row, col)), shape=shape).asformat(format)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def random(m, n, density=0.01, format='coo', dtype=None,
|
| 320 |
+
random_state=None, data_rvs=None):
|
| 321 |
+
"""Generates a random sparse matrix.
|
| 322 |
+
|
| 323 |
+
This function generates a random sparse matrix. First it selects non-zero
|
| 324 |
+
elements with given density ``density`` from ``(m, n)`` elements.
|
| 325 |
+
So the number of non-zero elements ``k`` is ``k = m * n * density``.
|
| 326 |
+
Value of each element is selected with ``data_rvs`` function.
|
| 327 |
+
|
| 328 |
+
Args:
|
| 329 |
+
m (int): Number of rows.
|
| 330 |
+
n (int): Number of cols.
|
| 331 |
+
density (float): Ratio of non-zero entries.
|
| 332 |
+
format (str): Matrix format.
|
| 333 |
+
dtype (~cupy.dtype): Type of the returned matrix values.
|
| 334 |
+
random_state (cupy.random.RandomState or int):
|
| 335 |
+
State of random number generator.
|
| 336 |
+
If an integer is given, the method makes a new state for random
|
| 337 |
+
number generator and uses it.
|
| 338 |
+
If it is not given, the default state is used.
|
| 339 |
+
This state is used to generate random indexes for nonzero entries.
|
| 340 |
+
data_rvs (callable): A function to generate data for a random matrix.
|
| 341 |
+
If it is not given, `random_state.rand` is used.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
cupyx.scipy.sparse.spmatrix: Generated matrix.
|
| 345 |
+
|
| 346 |
+
.. seealso:: :func:`scipy.sparse.random`
|
| 347 |
+
|
| 348 |
+
"""
|
| 349 |
+
if density < 0 or density > 1:
|
| 350 |
+
raise ValueError('density expected to be 0 <= density <= 1')
|
| 351 |
+
dtype = cupy.dtype(dtype)
|
| 352 |
+
if dtype.char not in 'fd':
|
| 353 |
+
raise NotImplementedError('type %s not supported' % dtype)
|
| 354 |
+
|
| 355 |
+
mn = m * n
|
| 356 |
+
|
| 357 |
+
k = int(density * m * n)
|
| 358 |
+
|
| 359 |
+
if random_state is None:
|
| 360 |
+
random_state = cupy.random
|
| 361 |
+
elif isinstance(random_state, (int, cupy.integer)):
|
| 362 |
+
random_state = cupy.random.RandomState(random_state)
|
| 363 |
+
|
| 364 |
+
if data_rvs is None:
|
| 365 |
+
data_rvs = random_state.rand
|
| 366 |
+
|
| 367 |
+
ind = random_state.choice(mn, size=k, replace=False)
|
| 368 |
+
j = ind // m
|
| 369 |
+
i = ind - j * m
|
| 370 |
+
vals = data_rvs(k).astype(dtype)
|
| 371 |
+
return _coo.coo_matrix(
|
| 372 |
+
(vals, (i, j)), shape=(m, n)).asformat(format)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def rand(m, n, density=0.01, format='coo', dtype=None, random_state=None):
|
| 376 |
+
"""Generates a random sparse matrix.
|
| 377 |
+
|
| 378 |
+
See :func:`cupyx.scipy.sparse.random` for detail.
|
| 379 |
+
|
| 380 |
+
Args:
|
| 381 |
+
m (int): Number of rows.
|
| 382 |
+
n (int): Number of cols.
|
| 383 |
+
density (float): Ratio of non-zero entries.
|
| 384 |
+
format (str): Matrix format.
|
| 385 |
+
dtype (~cupy.dtype): Type of the returned matrix values.
|
| 386 |
+
random_state (cupy.random.RandomState or int):
|
| 387 |
+
State of random number generator.
|
| 388 |
+
If an integer is given, the method makes a new state for random
|
| 389 |
+
number generator and uses it.
|
| 390 |
+
If it is not given, the default state is used.
|
| 391 |
+
This state is used to generate random indexes for nonzero entries.
|
| 392 |
+
|
| 393 |
+
Returns:
|
| 394 |
+
cupyx.scipy.sparse.spmatrix: Generated matrix.
|
| 395 |
+
|
| 396 |
+
.. seealso:: :func:`scipy.sparse.rand`
|
| 397 |
+
.. seealso:: :func:`cupyx.scipy.sparse.random`
|
| 398 |
+
|
| 399 |
+
"""
|
| 400 |
+
return random(m, n, density, format, dtype, random_state)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
|
| 404 |
+
"""Construct a sparse matrix from diagonals.
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
diagonals (sequence of array_like):
|
| 408 |
+
Sequence of arrays containing the matrix diagonals, corresponding
|
| 409 |
+
to `offsets`.
|
| 410 |
+
offsets (sequence of int or an int):
|
| 411 |
+
Diagonals to set:
|
| 412 |
+
- k = 0 the main diagonal (default)
|
| 413 |
+
- k > 0 the k-th upper diagonal
|
| 414 |
+
- k < 0 the k-th lower diagonal
|
| 415 |
+
shape (tuple of int):
|
| 416 |
+
Shape of the result. If omitted, a square matrix large enough
|
| 417 |
+
to contain the diagonals is returned.
|
| 418 |
+
format ({"dia", "csr", "csc", "lil", ...}):
|
| 419 |
+
Matrix format of the result. By default (format=None) an
|
| 420 |
+
appropriate sparse matrix format is returned. This choice is
|
| 421 |
+
subject to change.
|
| 422 |
+
dtype (dtype): Data type of the matrix.
|
| 423 |
+
|
| 424 |
+
Returns:
|
| 425 |
+
cupyx.scipy.sparse.spmatrix: Generated matrix.
|
| 426 |
+
|
| 427 |
+
Notes:
|
| 428 |
+
This function differs from `spdiags` in the way it handles
|
| 429 |
+
off-diagonals.
|
| 430 |
+
|
| 431 |
+
The result from `diags` is the sparse equivalent of::
|
| 432 |
+
|
| 433 |
+
cupy.diag(diagonals[0], offsets[0])
|
| 434 |
+
+ ...
|
| 435 |
+
+ cupy.diag(diagonals[k], offsets[k])
|
| 436 |
+
|
| 437 |
+
Repeated diagonal offsets are disallowed.
|
| 438 |
+
"""
|
| 439 |
+
# if offsets is not a sequence, assume that there's only one diagonal
|
| 440 |
+
if _sputils.isscalarlike(offsets):
|
| 441 |
+
# now check that there's actually only one diagonal
|
| 442 |
+
if len(diagonals) == 0 or _sputils.isscalarlike(diagonals[0]):
|
| 443 |
+
diagonals = [cupy.atleast_1d(diagonals)]
|
| 444 |
+
else:
|
| 445 |
+
raise ValueError('Different number of diagonals and offsets.')
|
| 446 |
+
else:
|
| 447 |
+
diagonals = list(map(cupy.atleast_1d, diagonals))
|
| 448 |
+
|
| 449 |
+
if isinstance(offsets, cupy.ndarray):
|
| 450 |
+
offsets = offsets.get()
|
| 451 |
+
offsets = numpy.atleast_1d(offsets)
|
| 452 |
+
|
| 453 |
+
# Basic check
|
| 454 |
+
if len(diagonals) != len(offsets):
|
| 455 |
+
raise ValueError('Different number of diagonals and offsets.')
|
| 456 |
+
|
| 457 |
+
# Determine shape, if omitted
|
| 458 |
+
if shape is None:
|
| 459 |
+
m = len(diagonals[0]) + abs(int(offsets[0]))
|
| 460 |
+
shape = (m, m)
|
| 461 |
+
|
| 462 |
+
# Determine data type, if omitted
|
| 463 |
+
if dtype is None:
|
| 464 |
+
dtype = cupy.common_type(*diagonals)
|
| 465 |
+
|
| 466 |
+
# Construct data array
|
| 467 |
+
m, n = shape
|
| 468 |
+
|
| 469 |
+
M = max([min(m + offset, n - offset) + max(0, offset)
|
| 470 |
+
for offset in offsets])
|
| 471 |
+
M = max(0, M)
|
| 472 |
+
data_arr = cupy.zeros((len(offsets), M), dtype=dtype)
|
| 473 |
+
|
| 474 |
+
K = min(m, n)
|
| 475 |
+
|
| 476 |
+
for j, diagonal in enumerate(diagonals):
|
| 477 |
+
offset = offsets[j]
|
| 478 |
+
k = max(0, offset)
|
| 479 |
+
length = min(m + offset, n - offset, K)
|
| 480 |
+
if length < 0:
|
| 481 |
+
raise ValueError(
|
| 482 |
+
'Offset %d (index %d) out of bounds' % (offset, j))
|
| 483 |
+
try:
|
| 484 |
+
data_arr[j, k:k+length] = diagonal[..., :length]
|
| 485 |
+
except ValueError:
|
| 486 |
+
if len(diagonal) != length and len(diagonal) != 1:
|
| 487 |
+
raise ValueError(
|
| 488 |
+
'Diagonal length (index %d: %d at offset %d) does not '
|
| 489 |
+
'agree with matrix size (%d, %d).' % (
|
| 490 |
+
j, len(diagonal), offset, m, n))
|
| 491 |
+
raise
|
| 492 |
+
|
| 493 |
+
return _dia.dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def kron(A, B, format=None):
|
| 497 |
+
"""Kronecker product of sparse matrices A and B.
|
| 498 |
+
|
| 499 |
+
Args:
|
| 500 |
+
A (cupyx.scipy.sparse.spmatrix): a sparse matrix.
|
| 501 |
+
B (cupyx.scipy.sparse.spmatrix): a sparse matrix.
|
| 502 |
+
format (str): the format of the returned sparse matrix.
|
| 503 |
+
|
| 504 |
+
Returns:
|
| 505 |
+
cupyx.scipy.sparse.spmatrix:
|
| 506 |
+
Generated sparse matrix with the specified ``format``.
|
| 507 |
+
|
| 508 |
+
.. seealso:: :func:`scipy.sparse.kron`
|
| 509 |
+
|
| 510 |
+
"""
|
| 511 |
+
# TODO(leofang): support BSR format when it's added to CuPy
|
| 512 |
+
# TODO(leofang): investigate if possible to optimize performance by
|
| 513 |
+
# starting with CSR instead of COO matrices
|
| 514 |
+
|
| 515 |
+
A = _coo.coo_matrix(A)
|
| 516 |
+
B = _coo.coo_matrix(B)
|
| 517 |
+
out_shape = (A.shape[0] * B.shape[0], A.shape[1] * B.shape[1])
|
| 518 |
+
|
| 519 |
+
if A.nnz == 0 or B.nnz == 0:
|
| 520 |
+
# kronecker product is the zero matrix
|
| 521 |
+
return _coo.coo_matrix(out_shape).asformat(format)
|
| 522 |
+
|
| 523 |
+
if max(out_shape[0], out_shape[1]) > cupy.iinfo('int32').max:
|
| 524 |
+
dtype = cupy.int64
|
| 525 |
+
else:
|
| 526 |
+
dtype = cupy.int32
|
| 527 |
+
|
| 528 |
+
# expand entries of A into blocks
|
| 529 |
+
row = A.row.astype(dtype, copy=True) * B.shape[0]
|
| 530 |
+
row = row.repeat(B.nnz)
|
| 531 |
+
col = A.col.astype(dtype, copy=True) * B.shape[1]
|
| 532 |
+
col = col.repeat(B.nnz)
|
| 533 |
+
data = A.data.repeat(B.nnz) # data's dtype follows that of A in SciPy
|
| 534 |
+
|
| 535 |
+
# increment block indices
|
| 536 |
+
row, col = row.reshape(-1, B.nnz), col.reshape(-1, B.nnz)
|
| 537 |
+
row += B.row
|
| 538 |
+
col += B.col
|
| 539 |
+
row, col = row.ravel(), col.ravel()
|
| 540 |
+
|
| 541 |
+
# compute block entries
|
| 542 |
+
data = data.reshape(-1, B.nnz) * B.data
|
| 543 |
+
data = data.ravel()
|
| 544 |
+
|
| 545 |
+
return _coo.coo_matrix(
|
| 546 |
+
(data, (row, col)), shape=out_shape).asformat(format)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
def kronsum(A, B, format=None):
|
| 550 |
+
"""Kronecker sum of sparse matrices A and B.
|
| 551 |
+
|
| 552 |
+
Kronecker sum is the sum of two Kronecker products
|
| 553 |
+
``kron(I_n, A) + kron(B, I_m)``, where ``I_n`` and ``I_m`` are identity
|
| 554 |
+
matrices.
|
| 555 |
+
|
| 556 |
+
Args:
|
| 557 |
+
A (cupyx.scipy.sparse.spmatrix): a sparse matrix.
|
| 558 |
+
B (cupyx.scipy.sparse.spmatrix): a sparse matrix.
|
| 559 |
+
format (str): the format of the returned sparse matrix.
|
| 560 |
+
|
| 561 |
+
Returns:
|
| 562 |
+
cupyx.scipy.sparse.spmatrix:
|
| 563 |
+
Generated sparse matrix with the specified ``format``.
|
| 564 |
+
|
| 565 |
+
.. seealso:: :func:`scipy.sparse.kronsum`
|
| 566 |
+
|
| 567 |
+
"""
|
| 568 |
+
A = _coo.coo_matrix(A)
|
| 569 |
+
B = _coo.coo_matrix(B)
|
| 570 |
+
|
| 571 |
+
if A.shape[0] != A.shape[1]:
|
| 572 |
+
raise ValueError('A is not square matrix')
|
| 573 |
+
|
| 574 |
+
if B.shape[0] != B.shape[1]:
|
| 575 |
+
raise ValueError('B is not square matrix')
|
| 576 |
+
|
| 577 |
+
dtype = _sputils.upcast(A.dtype, B.dtype)
|
| 578 |
+
|
| 579 |
+
L = kron(eye(B.shape[0], dtype=dtype), A, format=format)
|
| 580 |
+
R = kron(B, eye(A.shape[0], dtype=dtype), format=format)
|
| 581 |
+
|
| 582 |
+
return (L + R).asformat(format)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_coo.py
ADDED
|
@@ -0,0 +1,568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
try:
|
| 3 |
+
import scipy.sparse
|
| 4 |
+
_scipy_available = True
|
| 5 |
+
except ImportError:
|
| 6 |
+
_scipy_available = False
|
| 7 |
+
|
| 8 |
+
import cupy
|
| 9 |
+
from cupy import _core
|
| 10 |
+
from cupyx.scipy.sparse import _base
|
| 11 |
+
from cupyx.scipy.sparse import _csc
|
| 12 |
+
from cupyx.scipy.sparse import _csr
|
| 13 |
+
from cupyx.scipy.sparse import _data as sparse_data
|
| 14 |
+
from cupyx.scipy.sparse import _util
|
| 15 |
+
from cupyx.scipy.sparse import _sputils
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class coo_matrix(sparse_data._data_matrix):
|
| 19 |
+
|
| 20 |
+
"""COOrdinate format sparse matrix.
|
| 21 |
+
|
| 22 |
+
This can be instantiated in several ways.
|
| 23 |
+
|
| 24 |
+
``coo_matrix(D)``
|
| 25 |
+
``D`` is a rank-2 :class:`cupy.ndarray`.
|
| 26 |
+
|
| 27 |
+
``coo_matrix(S)``
|
| 28 |
+
``S`` is another sparse matrix. It is equivalent to ``S.tocoo()``.
|
| 29 |
+
|
| 30 |
+
``coo_matrix((M, N), [dtype])``
|
| 31 |
+
It constructs an empty matrix whose shape is ``(M, N)``. Default dtype
|
| 32 |
+
is float64.
|
| 33 |
+
|
| 34 |
+
``coo_matrix((data, (row, col)))``
|
| 35 |
+
All ``data``, ``row`` and ``col`` are one-dimenaional
|
| 36 |
+
:class:`cupy.ndarray`.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
arg1: Arguments for the initializer.
|
| 40 |
+
shape (tuple): Shape of a matrix. Its length must be two.
|
| 41 |
+
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
|
| 42 |
+
copy (bool): If ``True``, copies of given data are always used.
|
| 43 |
+
|
| 44 |
+
.. seealso::
|
| 45 |
+
:class:`scipy.sparse.coo_matrix`
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
format = 'coo'
|
| 50 |
+
|
| 51 |
+
_sum_duplicates_diff = _core.ElementwiseKernel(
|
| 52 |
+
'raw T row, raw T col',
|
| 53 |
+
'T diff',
|
| 54 |
+
'''
|
| 55 |
+
T diff_out = 1;
|
| 56 |
+
if (i == 0 || row[i - 1] == row[i] && col[i - 1] == col[i]) {
|
| 57 |
+
diff_out = 0;
|
| 58 |
+
}
|
| 59 |
+
diff = diff_out;
|
| 60 |
+
''', 'cupyx_scipy_sparse_coo_sum_duplicates_diff')
|
| 61 |
+
|
| 62 |
+
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
| 63 |
+
if shape is not None and len(shape) != 2:
|
| 64 |
+
raise ValueError(
|
| 65 |
+
'Only two-dimensional sparse arrays are supported.')
|
| 66 |
+
|
| 67 |
+
if _base.issparse(arg1):
|
| 68 |
+
x = arg1.asformat(self.format)
|
| 69 |
+
data = x.data
|
| 70 |
+
row = x.row
|
| 71 |
+
col = x.col
|
| 72 |
+
|
| 73 |
+
if arg1.format != self.format:
|
| 74 |
+
# When formats are different, all arrays are already copied
|
| 75 |
+
copy = False
|
| 76 |
+
|
| 77 |
+
if shape is None:
|
| 78 |
+
shape = arg1.shape
|
| 79 |
+
|
| 80 |
+
self.has_canonical_format = x.has_canonical_format
|
| 81 |
+
|
| 82 |
+
elif _util.isshape(arg1):
|
| 83 |
+
m, n = arg1
|
| 84 |
+
m, n = int(m), int(n)
|
| 85 |
+
data = cupy.zeros(0, dtype if dtype else 'd')
|
| 86 |
+
row = cupy.zeros(0, dtype='i')
|
| 87 |
+
col = cupy.zeros(0, dtype='i')
|
| 88 |
+
# shape and copy argument is ignored
|
| 89 |
+
shape = (m, n)
|
| 90 |
+
copy = False
|
| 91 |
+
|
| 92 |
+
self.has_canonical_format = True
|
| 93 |
+
|
| 94 |
+
elif _scipy_available and scipy.sparse.issparse(arg1):
|
| 95 |
+
# Convert scipy.sparse to cupyx.scipy.sparse
|
| 96 |
+
x = arg1.tocoo()
|
| 97 |
+
data = cupy.array(x.data)
|
| 98 |
+
row = cupy.array(x.row, dtype='i')
|
| 99 |
+
col = cupy.array(x.col, dtype='i')
|
| 100 |
+
copy = False
|
| 101 |
+
if shape is None:
|
| 102 |
+
shape = arg1.shape
|
| 103 |
+
|
| 104 |
+
self.has_canonical_format = x.has_canonical_format
|
| 105 |
+
|
| 106 |
+
elif isinstance(arg1, tuple) and len(arg1) == 2:
|
| 107 |
+
try:
|
| 108 |
+
data, (row, col) = arg1
|
| 109 |
+
except (TypeError, ValueError):
|
| 110 |
+
raise TypeError('invalid input format')
|
| 111 |
+
|
| 112 |
+
if not (_base.isdense(data) and data.ndim == 1 and
|
| 113 |
+
_base.isdense(row) and row.ndim == 1 and
|
| 114 |
+
_base.isdense(col) and col.ndim == 1):
|
| 115 |
+
raise ValueError('row, column, and data arrays must be 1-D')
|
| 116 |
+
if not (len(data) == len(row) == len(col)):
|
| 117 |
+
raise ValueError(
|
| 118 |
+
'row, column, and data array must all be the same length')
|
| 119 |
+
|
| 120 |
+
self.has_canonical_format = False
|
| 121 |
+
|
| 122 |
+
elif _base.isdense(arg1):
|
| 123 |
+
if arg1.ndim > 2:
|
| 124 |
+
raise TypeError('expected dimension <= 2 array or matrix')
|
| 125 |
+
dense = cupy.atleast_2d(arg1)
|
| 126 |
+
row, col = dense.nonzero()
|
| 127 |
+
data = dense[row, col]
|
| 128 |
+
shape = dense.shape
|
| 129 |
+
|
| 130 |
+
self.has_canonical_format = True
|
| 131 |
+
|
| 132 |
+
else:
|
| 133 |
+
raise TypeError('invalid input format')
|
| 134 |
+
|
| 135 |
+
if dtype is None:
|
| 136 |
+
dtype = data.dtype
|
| 137 |
+
else:
|
| 138 |
+
dtype = numpy.dtype(dtype)
|
| 139 |
+
|
| 140 |
+
if dtype not in (numpy.bool_, numpy.float32, numpy.float64,
|
| 141 |
+
numpy.complex64, numpy.complex128):
|
| 142 |
+
raise ValueError(
|
| 143 |
+
'Only bool, float32, float64, complex64 and complex128'
|
| 144 |
+
' are supported')
|
| 145 |
+
|
| 146 |
+
data = data.astype(dtype, copy=copy)
|
| 147 |
+
row = row.astype('i', copy=copy)
|
| 148 |
+
col = col.astype('i', copy=copy)
|
| 149 |
+
|
| 150 |
+
if shape is None:
|
| 151 |
+
if len(row) == 0 or len(col) == 0:
|
| 152 |
+
raise ValueError(
|
| 153 |
+
'cannot infer dimensions from zero sized index arrays')
|
| 154 |
+
shape = (int(row.max()) + 1, int(col.max()) + 1)
|
| 155 |
+
|
| 156 |
+
if len(data) > 0:
|
| 157 |
+
if row.max() >= shape[0]:
|
| 158 |
+
raise ValueError('row index exceeds matrix dimensions')
|
| 159 |
+
if col.max() >= shape[1]:
|
| 160 |
+
raise ValueError('column index exceeds matrix dimensions')
|
| 161 |
+
if row.min() < 0:
|
| 162 |
+
raise ValueError('negative row index found')
|
| 163 |
+
if col.min() < 0:
|
| 164 |
+
raise ValueError('negative column index found')
|
| 165 |
+
|
| 166 |
+
sparse_data._data_matrix.__init__(self, data)
|
| 167 |
+
self.row = row
|
| 168 |
+
self.col = col
|
| 169 |
+
if not _util.isshape(shape):
|
| 170 |
+
raise ValueError('invalid shape (must be a 2-tuple of int)')
|
| 171 |
+
self._shape = int(shape[0]), int(shape[1])
|
| 172 |
+
|
| 173 |
+
def _with_data(self, data, copy=True):
|
| 174 |
+
"""Returns a matrix with the same sparsity structure as self,
|
| 175 |
+
but with different data. By default the index arrays
|
| 176 |
+
(i.e. .row and .col) are copied.
|
| 177 |
+
"""
|
| 178 |
+
if copy:
|
| 179 |
+
return coo_matrix(
|
| 180 |
+
(data, (self.row.copy(), self.col.copy())),
|
| 181 |
+
shape=self.shape, dtype=data.dtype)
|
| 182 |
+
else:
|
| 183 |
+
return coo_matrix(
|
| 184 |
+
(data, (self.row, self.col)), shape=self.shape,
|
| 185 |
+
dtype=data.dtype)
|
| 186 |
+
|
| 187 |
+
def diagonal(self, k=0):
|
| 188 |
+
"""Returns the k-th diagonal of the matrix.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
k (int, optional): Which diagonal to get, corresponding to elements
|
| 192 |
+
a[i, i+k]. Default: 0 (the main diagonal).
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
cupy.ndarray : The k-th diagonal.
|
| 196 |
+
"""
|
| 197 |
+
rows, cols = self.shape
|
| 198 |
+
if k <= -rows or k >= cols:
|
| 199 |
+
return cupy.empty(0, dtype=self.data.dtype)
|
| 200 |
+
diag = cupy.zeros(min(rows + min(k, 0), cols - max(k, 0)),
|
| 201 |
+
dtype=self.dtype)
|
| 202 |
+
diag_mask = (self.row + k) == self.col
|
| 203 |
+
|
| 204 |
+
if self.has_canonical_format:
|
| 205 |
+
row = self.row[diag_mask]
|
| 206 |
+
data = self.data[diag_mask]
|
| 207 |
+
else:
|
| 208 |
+
diag_coo = coo_matrix((self.data[diag_mask],
|
| 209 |
+
(self.row[diag_mask], self.col[diag_mask])),
|
| 210 |
+
shape=self.shape)
|
| 211 |
+
diag_coo.sum_duplicates()
|
| 212 |
+
row = diag_coo.row
|
| 213 |
+
data = diag_coo.data
|
| 214 |
+
diag[row + min(k, 0)] = data
|
| 215 |
+
|
| 216 |
+
return diag
|
| 217 |
+
|
| 218 |
+
def setdiag(self, values, k=0):
|
| 219 |
+
"""Set diagonal or off-diagonal elements of the array.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
values (ndarray): New values of the diagonal elements. Values may
|
| 223 |
+
have any length. If the diagonal is longer than values, then
|
| 224 |
+
the remaining diagonal entries will not be set. If values are
|
| 225 |
+
longer than the diagonal, then the remaining values are
|
| 226 |
+
ignored. If a scalar value is given, all of the diagonal is set
|
| 227 |
+
to it.
|
| 228 |
+
k (int, optional): Which off-diagonal to set, corresponding to
|
| 229 |
+
elements a[i,i+k]. Default: 0 (the main diagonal).
|
| 230 |
+
|
| 231 |
+
"""
|
| 232 |
+
M, N = self.shape
|
| 233 |
+
if (k > 0 and k >= N) or (k < 0 and -k >= M):
|
| 234 |
+
raise ValueError("k exceeds matrix dimensions")
|
| 235 |
+
if values.ndim and not len(values):
|
| 236 |
+
return
|
| 237 |
+
idx_dtype = self.row.dtype
|
| 238 |
+
|
| 239 |
+
# Determine which triples to keep and where to put the new ones.
|
| 240 |
+
full_keep = self.col - self.row != k
|
| 241 |
+
if k < 0:
|
| 242 |
+
max_index = min(M + k, N)
|
| 243 |
+
if values.ndim:
|
| 244 |
+
max_index = min(max_index, len(values))
|
| 245 |
+
keep = cupy.logical_or(full_keep, self.col >= max_index)
|
| 246 |
+
new_row = cupy.arange(-k, -k + max_index, dtype=idx_dtype)
|
| 247 |
+
new_col = cupy.arange(max_index, dtype=idx_dtype)
|
| 248 |
+
else:
|
| 249 |
+
max_index = min(M, N - k)
|
| 250 |
+
if values.ndim:
|
| 251 |
+
max_index = min(max_index, len(values))
|
| 252 |
+
keep = cupy.logical_or(full_keep, self.row >= max_index)
|
| 253 |
+
new_row = cupy.arange(max_index, dtype=idx_dtype)
|
| 254 |
+
new_col = cupy.arange(k, k + max_index, dtype=idx_dtype)
|
| 255 |
+
|
| 256 |
+
# Define the array of data consisting of the entries to be added.
|
| 257 |
+
if values.ndim:
|
| 258 |
+
new_data = values[:max_index]
|
| 259 |
+
else:
|
| 260 |
+
new_data = cupy.full(max_index, values, dtype=self.dtype)
|
| 261 |
+
|
| 262 |
+
# Update the internal structure.
|
| 263 |
+
self.row = cupy.concatenate((self.row[keep], new_row))
|
| 264 |
+
self.col = cupy.concatenate((self.col[keep], new_col))
|
| 265 |
+
self.data = cupy.concatenate((self.data[keep], new_data))
|
| 266 |
+
self.has_canonical_format = False
|
| 267 |
+
|
| 268 |
+
def eliminate_zeros(self):
|
| 269 |
+
"""Removes zero entories in place."""
|
| 270 |
+
ind = self.data != 0
|
| 271 |
+
self.data = self.data[ind]
|
| 272 |
+
self.row = self.row[ind]
|
| 273 |
+
self.col = self.col[ind]
|
| 274 |
+
|
| 275 |
+
def get_shape(self):
|
| 276 |
+
"""Returns the shape of the matrix.
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
tuple: Shape of the matrix.
|
| 280 |
+
"""
|
| 281 |
+
return self._shape
|
| 282 |
+
|
| 283 |
+
def getnnz(self, axis=None):
|
| 284 |
+
"""Returns the number of stored values, including explicit zeros."""
|
| 285 |
+
if axis is None:
|
| 286 |
+
return self.data.size
|
| 287 |
+
else:
|
| 288 |
+
raise ValueError
|
| 289 |
+
|
| 290 |
+
def get(self, stream=None):
|
| 291 |
+
"""Returns a copy of the array on host memory.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
|
| 295 |
+
copy runs asynchronously. Otherwise, the copy is synchronous.
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
scipy.sparse.coo_matrix: Copy of the array on host memory.
|
| 299 |
+
|
| 300 |
+
"""
|
| 301 |
+
if not _scipy_available:
|
| 302 |
+
raise RuntimeError('scipy is not available')
|
| 303 |
+
|
| 304 |
+
data = self.data.get(stream)
|
| 305 |
+
row = self.row.get(stream)
|
| 306 |
+
col = self.col.get(stream)
|
| 307 |
+
return scipy.sparse.coo_matrix(
|
| 308 |
+
(data, (row, col)), shape=self.shape)
|
| 309 |
+
|
| 310 |
+
def reshape(self, *shape, order='C'):
|
| 311 |
+
"""Gives a new shape to a sparse matrix without changing its data.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
shape (tuple):
|
| 315 |
+
The new shape should be compatible with the original shape.
|
| 316 |
+
order: {'C', 'F'} (optional)
|
| 317 |
+
Read the elements using this index order. 'C' means to read and
|
| 318 |
+
write the elements using C-like index order. 'F' means to read
|
| 319 |
+
and write the elements using Fortran-like index order. Default:
|
| 320 |
+
C.
|
| 321 |
+
|
| 322 |
+
Returns:
|
| 323 |
+
cupyx.scipy.sparse.coo_matrix: sparse matrix
|
| 324 |
+
|
| 325 |
+
"""
|
| 326 |
+
|
| 327 |
+
shape = _sputils.check_shape(shape, self.shape)
|
| 328 |
+
|
| 329 |
+
if shape == self.shape:
|
| 330 |
+
return self
|
| 331 |
+
|
| 332 |
+
nrows, ncols = self.shape
|
| 333 |
+
|
| 334 |
+
if order == 'C': # C to represent matrix in row major format
|
| 335 |
+
dtype = _sputils.get_index_dtype(
|
| 336 |
+
maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
|
| 337 |
+
flat_indices = cupy.multiply(ncols, self.row,
|
| 338 |
+
dtype=dtype) + self.col
|
| 339 |
+
new_row, new_col = divmod(flat_indices, shape[1])
|
| 340 |
+
elif order == 'F':
|
| 341 |
+
dtype = _sputils.get_index_dtype(
|
| 342 |
+
maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
|
| 343 |
+
flat_indices = cupy.multiply(ncols, self.row,
|
| 344 |
+
dtype=dtype) + self.row
|
| 345 |
+
new_col, new_row = divmod(flat_indices, shape[0])
|
| 346 |
+
else:
|
| 347 |
+
raise ValueError("'order' must be 'C' or 'F'")
|
| 348 |
+
|
| 349 |
+
new_data = self.data
|
| 350 |
+
|
| 351 |
+
return coo_matrix((new_data, (new_row, new_col)), shape=shape,
|
| 352 |
+
copy=False)
|
| 353 |
+
|
| 354 |
+
def sum_duplicates(self):
|
| 355 |
+
"""Eliminate duplicate matrix entries by adding them together.
|
| 356 |
+
|
| 357 |
+
.. warning::
|
| 358 |
+
When sorting the indices, CuPy follows the convention of cuSPARSE,
|
| 359 |
+
which is different from that of SciPy. Therefore, the order of the
|
| 360 |
+
output indices may differ:
|
| 361 |
+
|
| 362 |
+
.. code-block:: python
|
| 363 |
+
|
| 364 |
+
>>> # 1 0 0
|
| 365 |
+
>>> # A = 1 1 0
|
| 366 |
+
>>> # 1 1 1
|
| 367 |
+
>>> data = cupy.array([1, 1, 1, 1, 1, 1], 'f')
|
| 368 |
+
>>> row = cupy.array([0, 1, 1, 2, 2, 2], 'i')
|
| 369 |
+
>>> col = cupy.array([0, 0, 1, 0, 1, 2], 'i')
|
| 370 |
+
>>> A = cupyx.scipy.sparse.coo_matrix((data, (row, col)),
|
| 371 |
+
... shape=(3, 3))
|
| 372 |
+
>>> a = A.get()
|
| 373 |
+
>>> A.sum_duplicates()
|
| 374 |
+
>>> a.sum_duplicates() # a is scipy.sparse.coo_matrix
|
| 375 |
+
>>> A.row
|
| 376 |
+
array([0, 1, 1, 2, 2, 2], dtype=int32)
|
| 377 |
+
>>> a.row
|
| 378 |
+
array([0, 1, 2, 1, 2, 2], dtype=int32)
|
| 379 |
+
>>> A.col
|
| 380 |
+
array([0, 0, 1, 0, 1, 2], dtype=int32)
|
| 381 |
+
>>> a.col
|
| 382 |
+
array([0, 0, 0, 1, 1, 2], dtype=int32)
|
| 383 |
+
|
| 384 |
+
.. warning::
|
| 385 |
+
Calling this function might synchronize the device.
|
| 386 |
+
|
| 387 |
+
.. seealso::
|
| 388 |
+
:meth:`scipy.sparse.coo_matrix.sum_duplicates`
|
| 389 |
+
|
| 390 |
+
"""
|
| 391 |
+
if self.has_canonical_format:
|
| 392 |
+
return
|
| 393 |
+
# Note: The sorting order below follows the cuSPARSE convention (first
|
| 394 |
+
# row then col, so-called row-major) and differs from that of SciPy, as
|
| 395 |
+
# the cuSPARSE functions such as cusparseSpMV() assume this sorting
|
| 396 |
+
# order.
|
| 397 |
+
# See https://docs.nvidia.com/cuda/cusparse/index.html#coo-format
|
| 398 |
+
keys = cupy.stack([self.col, self.row])
|
| 399 |
+
order = cupy.lexsort(keys)
|
| 400 |
+
src_data = self.data[order]
|
| 401 |
+
src_row = self.row[order]
|
| 402 |
+
src_col = self.col[order]
|
| 403 |
+
diff = self._sum_duplicates_diff(src_row, src_col, size=self.row.size)
|
| 404 |
+
|
| 405 |
+
if diff[1:].all():
|
| 406 |
+
# All elements have different indices.
|
| 407 |
+
data = src_data
|
| 408 |
+
row = src_row
|
| 409 |
+
col = src_col
|
| 410 |
+
else:
|
| 411 |
+
# TODO(leofang): move the kernels outside this method
|
| 412 |
+
index = cupy.cumsum(diff, dtype='i')
|
| 413 |
+
size = int(index[-1]) + 1
|
| 414 |
+
data = cupy.zeros(size, dtype=self.data.dtype)
|
| 415 |
+
row = cupy.empty(size, dtype='i')
|
| 416 |
+
col = cupy.empty(size, dtype='i')
|
| 417 |
+
if self.data.dtype.kind == 'b':
|
| 418 |
+
cupy.ElementwiseKernel(
|
| 419 |
+
'T src_data, int32 src_row, int32 src_col, int32 index',
|
| 420 |
+
'raw T data, raw int32 row, raw int32 col',
|
| 421 |
+
'''
|
| 422 |
+
if (src_data) data[index] = true;
|
| 423 |
+
row[index] = src_row;
|
| 424 |
+
col[index] = src_col;
|
| 425 |
+
''',
|
| 426 |
+
'cupyx_scipy_sparse_coo_sum_duplicates_assign'
|
| 427 |
+
)(src_data, src_row, src_col, index, data, row, col)
|
| 428 |
+
elif self.data.dtype.kind == 'f':
|
| 429 |
+
cupy.ElementwiseKernel(
|
| 430 |
+
'T src_data, int32 src_row, int32 src_col, int32 index',
|
| 431 |
+
'raw T data, raw int32 row, raw int32 col',
|
| 432 |
+
'''
|
| 433 |
+
atomicAdd(&data[index], src_data);
|
| 434 |
+
row[index] = src_row;
|
| 435 |
+
col[index] = src_col;
|
| 436 |
+
''',
|
| 437 |
+
'cupyx_scipy_sparse_coo_sum_duplicates_assign'
|
| 438 |
+
)(src_data, src_row, src_col, index, data, row, col)
|
| 439 |
+
elif self.data.dtype.kind == 'c':
|
| 440 |
+
cupy.ElementwiseKernel(
|
| 441 |
+
'T src_real, T src_imag, int32 src_row, int32 src_col, '
|
| 442 |
+
'int32 index',
|
| 443 |
+
'raw T real, raw T imag, raw int32 row, raw int32 col',
|
| 444 |
+
'''
|
| 445 |
+
atomicAdd(&real[index], src_real);
|
| 446 |
+
atomicAdd(&imag[index], src_imag);
|
| 447 |
+
row[index] = src_row;
|
| 448 |
+
col[index] = src_col;
|
| 449 |
+
''',
|
| 450 |
+
'cupyx_scipy_sparse_coo_sum_duplicates_assign_complex'
|
| 451 |
+
)(src_data.real, src_data.imag, src_row, src_col, index,
|
| 452 |
+
data.real, data.imag, row, col)
|
| 453 |
+
|
| 454 |
+
self.data = data
|
| 455 |
+
self.row = row
|
| 456 |
+
self.col = col
|
| 457 |
+
self.has_canonical_format = True
|
| 458 |
+
|
| 459 |
+
def toarray(self, order=None, out=None):
|
| 460 |
+
"""Returns a dense matrix representing the same value.
|
| 461 |
+
|
| 462 |
+
Args:
|
| 463 |
+
order (str): Not supported.
|
| 464 |
+
out: Not supported.
|
| 465 |
+
|
| 466 |
+
Returns:
|
| 467 |
+
cupy.ndarray: Dense array representing the same value.
|
| 468 |
+
|
| 469 |
+
.. seealso:: :meth:`scipy.sparse.coo_matrix.toarray`
|
| 470 |
+
|
| 471 |
+
"""
|
| 472 |
+
return self.tocsr().toarray(order=order, out=out)
|
| 473 |
+
|
| 474 |
+
def tocoo(self, copy=False):
|
| 475 |
+
"""Converts the matrix to COOrdinate format.
|
| 476 |
+
|
| 477 |
+
Args:
|
| 478 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 479 |
+
possible.
|
| 480 |
+
|
| 481 |
+
Returns:
|
| 482 |
+
cupyx.scipy.sparse.coo_matrix: Converted matrix.
|
| 483 |
+
|
| 484 |
+
"""
|
| 485 |
+
if copy:
|
| 486 |
+
return self.copy()
|
| 487 |
+
else:
|
| 488 |
+
return self
|
| 489 |
+
|
| 490 |
+
def tocsc(self, copy=False):
|
| 491 |
+
"""Converts the matrix to Compressed Sparse Column format.
|
| 492 |
+
|
| 493 |
+
Args:
|
| 494 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 495 |
+
possible. Actually this option is ignored because all
|
| 496 |
+
arrays in a matrix cannot be shared in coo to csc conversion.
|
| 497 |
+
|
| 498 |
+
Returns:
|
| 499 |
+
cupyx.scipy.sparse.csc_matrix: Converted matrix.
|
| 500 |
+
|
| 501 |
+
"""
|
| 502 |
+
from cupyx import cusparse
|
| 503 |
+
|
| 504 |
+
if self.nnz == 0:
|
| 505 |
+
return _csc.csc_matrix(self.shape, dtype=self.dtype)
|
| 506 |
+
# copy is silently ignored (in line with SciPy) because both
|
| 507 |
+
# sum_duplicates and coosort change the underlying data
|
| 508 |
+
x = self.copy()
|
| 509 |
+
x.sum_duplicates()
|
| 510 |
+
cusparse.coosort(x, 'c')
|
| 511 |
+
x = cusparse.coo2csc(x)
|
| 512 |
+
x.has_canonical_format = True
|
| 513 |
+
return x
|
| 514 |
+
|
| 515 |
+
def tocsr(self, copy=False):
|
| 516 |
+
"""Converts the matrix to Compressed Sparse Row format.
|
| 517 |
+
|
| 518 |
+
Args:
|
| 519 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 520 |
+
possible. Actually this option is ignored because all
|
| 521 |
+
arrays in a matrix cannot be shared in coo to csr conversion.
|
| 522 |
+
|
| 523 |
+
Returns:
|
| 524 |
+
cupyx.scipy.sparse.csr_matrix: Converted matrix.
|
| 525 |
+
|
| 526 |
+
"""
|
| 527 |
+
from cupyx import cusparse
|
| 528 |
+
|
| 529 |
+
if self.nnz == 0:
|
| 530 |
+
return _csr.csr_matrix(self.shape, dtype=self.dtype)
|
| 531 |
+
# copy is silently ignored (in line with SciPy) because both
|
| 532 |
+
# sum_duplicates and coosort change the underlying data
|
| 533 |
+
x = self.copy()
|
| 534 |
+
x.sum_duplicates()
|
| 535 |
+
cusparse.coosort(x, 'r')
|
| 536 |
+
x = cusparse.coo2csr(x)
|
| 537 |
+
x.has_canonical_format = True
|
| 538 |
+
return x
|
| 539 |
+
|
| 540 |
+
def transpose(self, axes=None, copy=False):
|
| 541 |
+
"""Returns a transpose matrix.
|
| 542 |
+
|
| 543 |
+
Args:
|
| 544 |
+
axes: This option is not supported.
|
| 545 |
+
copy (bool): If ``True``, a returned matrix shares no data.
|
| 546 |
+
Otherwise, it shared data arrays as much as possible.
|
| 547 |
+
|
| 548 |
+
Returns:
|
| 549 |
+
cupyx.scipy.sparse.spmatrix: Transpose matrix.
|
| 550 |
+
|
| 551 |
+
"""
|
| 552 |
+
if axes is not None:
|
| 553 |
+
raise ValueError(
|
| 554 |
+
'Sparse matrices do not support an \'axes\' parameter because '
|
| 555 |
+
'swapping dimensions is the only logical permutation.')
|
| 556 |
+
shape = self.shape[1], self.shape[0]
|
| 557 |
+
return coo_matrix(
|
| 558 |
+
(self.data, (self.col, self.row)), shape=shape, copy=copy)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
def isspmatrix_coo(x):
|
| 562 |
+
"""Checks if a given matrix is of COO format.
|
| 563 |
+
|
| 564 |
+
Returns:
|
| 565 |
+
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.coo_matrix`.
|
| 566 |
+
|
| 567 |
+
"""
|
| 568 |
+
return isinstance(x, coo_matrix)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csc.py
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
import scipy.sparse
|
| 3 |
+
_scipy_available = True
|
| 4 |
+
except ImportError:
|
| 5 |
+
_scipy_available = False
|
| 6 |
+
|
| 7 |
+
import cupy
|
| 8 |
+
from cupy_backends.cuda.api import driver
|
| 9 |
+
from cupy_backends.cuda.api import runtime
|
| 10 |
+
import cupyx.scipy.sparse
|
| 11 |
+
from cupyx.scipy.sparse import _base
|
| 12 |
+
from cupyx.scipy.sparse import _compressed
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class csc_matrix(_compressed._compressed_sparse_matrix):
|
| 16 |
+
|
| 17 |
+
"""Compressed Sparse Column matrix.
|
| 18 |
+
|
| 19 |
+
This can be instantiated in several ways.
|
| 20 |
+
|
| 21 |
+
``csc_matrix(D)``
|
| 22 |
+
``D`` is a rank-2 :class:`cupy.ndarray`.
|
| 23 |
+
``csc_matrix(S)``
|
| 24 |
+
``S`` is another sparse matrix. It is equivalent to ``S.tocsc()``.
|
| 25 |
+
``csc_matrix((M, N), [dtype])``
|
| 26 |
+
It constructs an empty matrix whose shape is ``(M, N)``. Default dtype
|
| 27 |
+
is float64.
|
| 28 |
+
``csc_matrix((data, (row, col)))``
|
| 29 |
+
All ``data``, ``row`` and ``col`` are one-dimenaional
|
| 30 |
+
:class:`cupy.ndarray`.
|
| 31 |
+
``csc_matrix((data, indices, indptr))``
|
| 32 |
+
All ``data``, ``indices`` and ``indptr`` are one-dimenaional
|
| 33 |
+
:class:`cupy.ndarray`.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
arg1: Arguments for the initializer.
|
| 37 |
+
shape (tuple): Shape of a matrix. Its length must be two.
|
| 38 |
+
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
|
| 39 |
+
copy (bool): If ``True``, copies of given arrays are always used.
|
| 40 |
+
|
| 41 |
+
.. seealso::
|
| 42 |
+
:class:`scipy.sparse.csc_matrix`
|
| 43 |
+
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
format = 'csc'
|
| 47 |
+
|
| 48 |
+
def get(self, stream=None):
|
| 49 |
+
"""Returns a copy of the array on host memory.
|
| 50 |
+
|
| 51 |
+
.. warning::
|
| 52 |
+
You need to install SciPy to use this method.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
|
| 56 |
+
copy runs asynchronously. Otherwise, the copy is synchronous.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
scipy.sparse.csc_matrix: Copy of the array on host memory.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
if not _scipy_available:
|
| 63 |
+
raise RuntimeError('scipy is not available')
|
| 64 |
+
data = self.data.get(stream)
|
| 65 |
+
indices = self.indices.get(stream)
|
| 66 |
+
indptr = self.indptr.get(stream)
|
| 67 |
+
return scipy.sparse.csc_matrix(
|
| 68 |
+
(data, indices, indptr), shape=self._shape)
|
| 69 |
+
|
| 70 |
+
def _convert_dense(self, x):
|
| 71 |
+
from cupyx import cusparse
|
| 72 |
+
|
| 73 |
+
if cusparse.check_availability('denseToSparse'):
|
| 74 |
+
m = cusparse.denseToSparse(x, format='csc')
|
| 75 |
+
else:
|
| 76 |
+
m = cusparse.dense2csc(x)
|
| 77 |
+
return m.data, m.indices, m.indptr
|
| 78 |
+
|
| 79 |
+
def _swap(self, x, y):
|
| 80 |
+
return (y, x)
|
| 81 |
+
|
| 82 |
+
def __mul__(self, other):
|
| 83 |
+
from cupyx import cusparse
|
| 84 |
+
|
| 85 |
+
if cupy.isscalar(other):
|
| 86 |
+
self.sum_duplicates()
|
| 87 |
+
return self._with_data(self.data * other)
|
| 88 |
+
elif cupyx.scipy.sparse.isspmatrix_csr(other):
|
| 89 |
+
self.sum_duplicates()
|
| 90 |
+
other.sum_duplicates()
|
| 91 |
+
if cusparse.check_availability('spgemm'):
|
| 92 |
+
a = self.tocsr()
|
| 93 |
+
a.sum_duplicates()
|
| 94 |
+
return cusparse.spgemm(a, other)
|
| 95 |
+
elif cusparse.check_availability('csrgemm') and not runtime.is_hip:
|
| 96 |
+
# trans=True is still buggy as of ROCm 4.2.0
|
| 97 |
+
a = self.T
|
| 98 |
+
return cusparse.csrgemm(a, other, transa=True)
|
| 99 |
+
elif cusparse.check_availability('csrgemm2'):
|
| 100 |
+
a = self.tocsr()
|
| 101 |
+
a.sum_duplicates()
|
| 102 |
+
return cusparse.csrgemm2(a, other)
|
| 103 |
+
else:
|
| 104 |
+
raise AssertionError
|
| 105 |
+
elif isspmatrix_csc(other):
|
| 106 |
+
self.sum_duplicates()
|
| 107 |
+
other.sum_duplicates()
|
| 108 |
+
if cusparse.check_availability('csrgemm') and not runtime.is_hip:
|
| 109 |
+
# trans=True is still buggy as of ROCm 4.2.0
|
| 110 |
+
a = self.T
|
| 111 |
+
b = other.T
|
| 112 |
+
return cusparse.csrgemm(a, b, transa=True, transb=True)
|
| 113 |
+
elif cusparse.check_availability('csrgemm2'):
|
| 114 |
+
a = self.tocsr()
|
| 115 |
+
b = other.tocsr()
|
| 116 |
+
a.sum_duplicates()
|
| 117 |
+
b.sum_duplicates()
|
| 118 |
+
return cusparse.csrgemm2(a, b)
|
| 119 |
+
elif cusparse.check_availability('spgemm'):
|
| 120 |
+
a = self.tocsr()
|
| 121 |
+
b = other.tocsr()
|
| 122 |
+
a.sum_duplicates()
|
| 123 |
+
b.sum_duplicates()
|
| 124 |
+
return cusparse.spgemm(a, b)
|
| 125 |
+
else:
|
| 126 |
+
raise AssertionError
|
| 127 |
+
elif cupyx.scipy.sparse.isspmatrix(other):
|
| 128 |
+
return self * other.tocsr()
|
| 129 |
+
elif _base.isdense(other):
|
| 130 |
+
if other.ndim == 0:
|
| 131 |
+
self.sum_duplicates()
|
| 132 |
+
return self._with_data(self.data * other)
|
| 133 |
+
elif other.ndim == 1:
|
| 134 |
+
self.sum_duplicates()
|
| 135 |
+
if (
|
| 136 |
+
cusparse.check_availability('csrmv')
|
| 137 |
+
and (
|
| 138 |
+
not runtime.is_hip
|
| 139 |
+
or driver.get_build_version() >= 5_00_00000
|
| 140 |
+
)
|
| 141 |
+
):
|
| 142 |
+
# trans=True is buggy as of ROCm 4.2.0
|
| 143 |
+
csrmv = cusparse.csrmv
|
| 144 |
+
elif (cusparse.check_availability('spmv')
|
| 145 |
+
and not runtime.is_hip):
|
| 146 |
+
# trans=True is buggy as of ROCm 4.2.0
|
| 147 |
+
# (I got HIPSPARSE_STATUS_INTERNAL_ERROR...)
|
| 148 |
+
csrmv = cusparse.spmv
|
| 149 |
+
else:
|
| 150 |
+
raise AssertionError
|
| 151 |
+
return csrmv(self.T, cupy.asfortranarray(other), transa=True)
|
| 152 |
+
elif other.ndim == 2:
|
| 153 |
+
self.sum_duplicates()
|
| 154 |
+
if (
|
| 155 |
+
cusparse.check_availability('csrmm2')
|
| 156 |
+
and (
|
| 157 |
+
not runtime.is_hip
|
| 158 |
+
or driver.get_build_version() >= 5_00_00000
|
| 159 |
+
)
|
| 160 |
+
):
|
| 161 |
+
# trans=True is buggy as of ROCm 4.2.0
|
| 162 |
+
csrmm = cusparse.csrmm2
|
| 163 |
+
elif cusparse.check_availability('spmm'):
|
| 164 |
+
csrmm = cusparse.spmm
|
| 165 |
+
else:
|
| 166 |
+
raise AssertionError
|
| 167 |
+
return csrmm(self.T, cupy.asfortranarray(other), transa=True)
|
| 168 |
+
else:
|
| 169 |
+
raise ValueError('could not interpret dimensions')
|
| 170 |
+
else:
|
| 171 |
+
return NotImplemented
|
| 172 |
+
|
| 173 |
+
# TODO(unno): Implement check_format
|
| 174 |
+
# TODO(unno): Implement diagonal
|
| 175 |
+
|
| 176 |
+
def eliminate_zeros(self):
|
| 177 |
+
"""Removes zero entories in place."""
|
| 178 |
+
t = self.T
|
| 179 |
+
t.eliminate_zeros()
|
| 180 |
+
compress = t.T
|
| 181 |
+
self.data = compress.data
|
| 182 |
+
self.indices = compress.indices
|
| 183 |
+
self.indptr = compress.indptr
|
| 184 |
+
|
| 185 |
+
# TODO(unno): Implement maximum
|
| 186 |
+
# TODO(unno): Implement minimum
|
| 187 |
+
# TODO(unno): Implement multiply
|
| 188 |
+
# TODO(unno): Implement prune
|
| 189 |
+
|
| 190 |
+
def sort_indices(self):
|
| 191 |
+
"""Sorts the indices of this matrix *in place*.
|
| 192 |
+
|
| 193 |
+
.. warning::
|
| 194 |
+
Calling this function might synchronize the device.
|
| 195 |
+
|
| 196 |
+
"""
|
| 197 |
+
from cupyx import cusparse
|
| 198 |
+
|
| 199 |
+
if not self.has_sorted_indices:
|
| 200 |
+
cusparse.cscsort(self)
|
| 201 |
+
self.has_sorted_indices = True
|
| 202 |
+
|
| 203 |
+
def toarray(self, order=None, out=None):
|
| 204 |
+
"""Returns a dense matrix representing the same value.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
order ({'C', 'F', None}): Whether to store data in C (row-major)
|
| 208 |
+
order or F (column-major) order. Default is C-order.
|
| 209 |
+
out: Not supported.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
cupy.ndarray: Dense array representing the same matrix.
|
| 213 |
+
|
| 214 |
+
.. seealso:: :meth:`scipy.sparse.csc_matrix.toarray`
|
| 215 |
+
|
| 216 |
+
"""
|
| 217 |
+
from cupyx import cusparse
|
| 218 |
+
|
| 219 |
+
if order is None:
|
| 220 |
+
order = 'C'
|
| 221 |
+
order = order.upper()
|
| 222 |
+
if self.nnz == 0:
|
| 223 |
+
return cupy.zeros(shape=self.shape, dtype=self.dtype, order=order)
|
| 224 |
+
|
| 225 |
+
x = self.copy()
|
| 226 |
+
x.has_canonical_format = False # need to enforce sum_duplicates
|
| 227 |
+
x.sum_duplicates()
|
| 228 |
+
if (cusparse.check_availability('sparseToDense')
|
| 229 |
+
and (not runtime.is_hip or x.nnz > 0)):
|
| 230 |
+
# On HIP, nnz=0 is problematic as of ROCm 4.2.0
|
| 231 |
+
y = cusparse.sparseToDense(x)
|
| 232 |
+
if order == 'F':
|
| 233 |
+
return y
|
| 234 |
+
elif order == 'C':
|
| 235 |
+
return cupy.ascontiguousarray(y)
|
| 236 |
+
else:
|
| 237 |
+
raise ValueError('order not understood')
|
| 238 |
+
else:
|
| 239 |
+
# csc2dense and csr2dense returns F-contiguous array.
|
| 240 |
+
if order == 'C':
|
| 241 |
+
# To return C-contiguous array, it uses transpose.
|
| 242 |
+
return cusparse.csr2dense(x.T).T
|
| 243 |
+
elif order == 'F':
|
| 244 |
+
return cusparse.csc2dense(x)
|
| 245 |
+
else:
|
| 246 |
+
raise ValueError('order not understood')
|
| 247 |
+
|
| 248 |
+
def _add_sparse(self, other, alpha, beta):
|
| 249 |
+
from cupyx import cusparse
|
| 250 |
+
|
| 251 |
+
self.sum_duplicates()
|
| 252 |
+
other = other.tocsc().T
|
| 253 |
+
other.sum_duplicates()
|
| 254 |
+
if cusparse.check_availability('csrgeam2'):
|
| 255 |
+
csrgeam = cusparse.csrgeam2
|
| 256 |
+
elif cusparse.check_availability('csrgeam'):
|
| 257 |
+
csrgeam = cusparse.csrgeam
|
| 258 |
+
else:
|
| 259 |
+
raise NotImplementedError
|
| 260 |
+
return csrgeam(self.T, other, alpha, beta).T
|
| 261 |
+
|
| 262 |
+
# TODO(unno): Implement tobsr
|
| 263 |
+
|
| 264 |
+
def tocoo(self, copy=False):
|
| 265 |
+
"""Converts the matrix to COOrdinate format.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 269 |
+
possible.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
cupyx.scipy.sparse.coo_matrix: Converted matrix.
|
| 273 |
+
|
| 274 |
+
"""
|
| 275 |
+
from cupyx import cusparse
|
| 276 |
+
|
| 277 |
+
if copy:
|
| 278 |
+
data = self.data.copy()
|
| 279 |
+
indices = self.indices.copy()
|
| 280 |
+
else:
|
| 281 |
+
data = self.data
|
| 282 |
+
indices = self.indices
|
| 283 |
+
|
| 284 |
+
return cusparse.csc2coo(self, data, indices)
|
| 285 |
+
|
| 286 |
+
def tocsc(self, copy=None):
|
| 287 |
+
"""Converts the matrix to Compressed Sparse Column format.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
copy (bool): If ``False``, the method returns itself.
|
| 291 |
+
Otherwise it makes a copy of the matrix.
|
| 292 |
+
|
| 293 |
+
Returns:
|
| 294 |
+
cupyx.scipy.sparse.csc_matrix: Converted matrix.
|
| 295 |
+
|
| 296 |
+
"""
|
| 297 |
+
if copy:
|
| 298 |
+
return self.copy()
|
| 299 |
+
else:
|
| 300 |
+
return self
|
| 301 |
+
|
| 302 |
+
def tocsr(self, copy=False):
|
| 303 |
+
"""Converts the matrix to Compressed Sparse Row format.
|
| 304 |
+
|
| 305 |
+
Args:
|
| 306 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 307 |
+
possible. Actually this option is ignored because all
|
| 308 |
+
arrays in a matrix cannot be shared in csr to csc conversion.
|
| 309 |
+
|
| 310 |
+
Returns:
|
| 311 |
+
cupyx.scipy.sparse.csr_matrix: Converted matrix.
|
| 312 |
+
|
| 313 |
+
"""
|
| 314 |
+
from cupyx import cusparse
|
| 315 |
+
|
| 316 |
+
# copy is ignored
|
| 317 |
+
if cusparse.check_availability('csc2csr'):
|
| 318 |
+
csc2csr = cusparse.csc2csr
|
| 319 |
+
elif cusparse.check_availability('csc2csrEx2'):
|
| 320 |
+
csc2csr = cusparse.csc2csrEx2
|
| 321 |
+
else:
|
| 322 |
+
raise NotImplementedError
|
| 323 |
+
# don't touch has_sorted_indices, as cuSPARSE made no guarantee
|
| 324 |
+
return csc2csr(self)
|
| 325 |
+
|
| 326 |
+
def _tocsx(self):
|
| 327 |
+
"""Inverts the format.
|
| 328 |
+
"""
|
| 329 |
+
return self.tocsr()
|
| 330 |
+
|
| 331 |
+
# TODO(unno): Implement todia
|
| 332 |
+
# TODO(unno): Implement todok
|
| 333 |
+
# TODO(unno): Implement tolil
|
| 334 |
+
|
| 335 |
+
def transpose(self, axes=None, copy=False):
|
| 336 |
+
"""Returns a transpose matrix.
|
| 337 |
+
|
| 338 |
+
Args:
|
| 339 |
+
axes: This option is not supported.
|
| 340 |
+
copy (bool): If ``True``, a returned matrix shares no data.
|
| 341 |
+
Otherwise, it shared data arrays as much as possible.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
cupyx.scipy.sparse.csr_matrix: `self` with the dimensions reversed.
|
| 345 |
+
|
| 346 |
+
"""
|
| 347 |
+
if axes is not None:
|
| 348 |
+
raise ValueError(
|
| 349 |
+
'Sparse matrices do not support an \'axes\' parameter because '
|
| 350 |
+
'swapping dimensions is the only logical permutation.')
|
| 351 |
+
|
| 352 |
+
shape = self.shape[1], self.shape[0]
|
| 353 |
+
trans = cupyx.scipy.sparse.csr_matrix(
|
| 354 |
+
(self.data, self.indices, self.indptr), shape=shape, copy=copy)
|
| 355 |
+
trans.has_canonical_format = self.has_canonical_format
|
| 356 |
+
return trans
|
| 357 |
+
|
| 358 |
+
def getrow(self, i):
|
| 359 |
+
"""Returns a copy of row i of the matrix, as a (1 x n)
|
| 360 |
+
CSR matrix (row vector).
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
i (integer): Row
|
| 364 |
+
|
| 365 |
+
Returns:
|
| 366 |
+
cupyx.scipy.sparse.csc_matrix: Sparse matrix with single row
|
| 367 |
+
"""
|
| 368 |
+
return self._minor_slice(slice(i, i + 1), copy=True).tocsr()
|
| 369 |
+
|
| 370 |
+
def getcol(self, i):
|
| 371 |
+
"""Returns a copy of column i of the matrix, as a (m x 1)
|
| 372 |
+
CSC matrix (column vector).
|
| 373 |
+
|
| 374 |
+
Args:
|
| 375 |
+
i (integer): Column
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
cupyx.scipy.sparse.csc_matrix: Sparse matrix with single column
|
| 379 |
+
"""
|
| 380 |
+
return self._major_slice(slice(i, i + 1), copy=True)
|
| 381 |
+
|
| 382 |
+
def _get_intXarray(self, row, col):
|
| 383 |
+
row = slice(row, row + 1)
|
| 384 |
+
return self._major_index_fancy(col)._minor_slice(row)
|
| 385 |
+
|
| 386 |
+
def _get_intXslice(self, row, col):
|
| 387 |
+
row = slice(row, row + 1)
|
| 388 |
+
copy = col.step in (1, None)
|
| 389 |
+
return self._major_slice(col)._minor_slice(row, copy=copy)
|
| 390 |
+
|
| 391 |
+
def _get_sliceXint(self, row, col):
|
| 392 |
+
col = slice(col, col + 1)
|
| 393 |
+
return self._major_slice(col)._minor_slice(row, copy=True)
|
| 394 |
+
|
| 395 |
+
def _get_sliceXarray(self, row, col):
|
| 396 |
+
return self._major_index_fancy(col)._minor_slice(row)
|
| 397 |
+
|
| 398 |
+
def _get_arrayXint(self, row, col):
|
| 399 |
+
col = slice(col, col + 1)
|
| 400 |
+
return self._major_slice(col)._minor_index_fancy(row)
|
| 401 |
+
|
| 402 |
+
def _get_arrayXslice(self, row, col):
|
| 403 |
+
return self._major_slice(col)._minor_index_fancy(row)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def isspmatrix_csc(x):
|
| 407 |
+
"""Checks if a given matrix is of CSC format.
|
| 408 |
+
|
| 409 |
+
Returns:
|
| 410 |
+
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.csc_matrix`.
|
| 411 |
+
|
| 412 |
+
"""
|
| 413 |
+
return isinstance(x, csc_matrix)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_csr.py
ADDED
|
@@ -0,0 +1,1242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import numpy
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import scipy.sparse
|
| 8 |
+
_scipy_available = True
|
| 9 |
+
except ImportError:
|
| 10 |
+
_scipy_available = False
|
| 11 |
+
|
| 12 |
+
import cupy
|
| 13 |
+
from cupy._core import _accelerator
|
| 14 |
+
from cupy.cuda import cub
|
| 15 |
+
from cupy.cuda import runtime
|
| 16 |
+
from cupyx.scipy.sparse import _base
|
| 17 |
+
from cupyx.scipy.sparse import _compressed
|
| 18 |
+
from cupyx.scipy.sparse import _csc
|
| 19 |
+
from cupyx.scipy.sparse import SparseEfficiencyWarning
|
| 20 |
+
from cupyx.scipy.sparse import _util
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class csr_matrix(_compressed._compressed_sparse_matrix):
|
| 24 |
+
|
| 25 |
+
"""Compressed Sparse Row matrix.
|
| 26 |
+
|
| 27 |
+
This can be instantiated in several ways.
|
| 28 |
+
|
| 29 |
+
``csr_matrix(D)``
|
| 30 |
+
``D`` is a rank-2 :class:`cupy.ndarray`.
|
| 31 |
+
``csr_matrix(S)``
|
| 32 |
+
``S`` is another sparse matrix. It is equivalent to ``S.tocsr()``.
|
| 33 |
+
``csr_matrix((M, N), [dtype])``
|
| 34 |
+
It constructs an empty matrix whose shape is ``(M, N)``. Default dtype
|
| 35 |
+
is float64.
|
| 36 |
+
``csr_matrix((data, (row, col)))``
|
| 37 |
+
All ``data``, ``row`` and ``col`` are one-dimenaional
|
| 38 |
+
:class:`cupy.ndarray`.
|
| 39 |
+
``csr_matrix((data, indices, indptr))``
|
| 40 |
+
All ``data``, ``indices`` and ``indptr`` are one-dimenaional
|
| 41 |
+
:class:`cupy.ndarray`.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
arg1: Arguments for the initializer.
|
| 45 |
+
shape (tuple): Shape of a matrix. Its length must be two.
|
| 46 |
+
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
|
| 47 |
+
copy (bool): If ``True``, copies of given arrays are always used.
|
| 48 |
+
|
| 49 |
+
.. seealso::
|
| 50 |
+
:class:`scipy.sparse.csr_matrix`
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
format = 'csr'
|
| 55 |
+
|
| 56 |
+
def get(self, stream=None):
|
| 57 |
+
"""Returns a copy of the array on host memory.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
|
| 61 |
+
copy runs asynchronously. Otherwise, the copy is synchronous.
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
scipy.sparse.csr_matrix: Copy of the array on host memory.
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
if not _scipy_available:
|
| 68 |
+
raise RuntimeError('scipy is not available')
|
| 69 |
+
data = self.data.get(stream)
|
| 70 |
+
indices = self.indices.get(stream)
|
| 71 |
+
indptr = self.indptr.get(stream)
|
| 72 |
+
return scipy.sparse.csr_matrix(
|
| 73 |
+
(data, indices, indptr), shape=self._shape)
|
| 74 |
+
|
| 75 |
+
def _convert_dense(self, x):
|
| 76 |
+
m = dense2csr(x)
|
| 77 |
+
return m.data, m.indices, m.indptr
|
| 78 |
+
|
| 79 |
+
def _swap(self, x, y):
|
| 80 |
+
return (x, y)
|
| 81 |
+
|
| 82 |
+
def _add_sparse(self, other, alpha, beta):
|
| 83 |
+
from cupyx import cusparse
|
| 84 |
+
|
| 85 |
+
self.sum_duplicates()
|
| 86 |
+
other = other.tocsr()
|
| 87 |
+
other.sum_duplicates()
|
| 88 |
+
if cusparse.check_availability('csrgeam2'):
|
| 89 |
+
csrgeam = cusparse.csrgeam2
|
| 90 |
+
elif cusparse.check_availability('csrgeam'):
|
| 91 |
+
csrgeam = cusparse.csrgeam
|
| 92 |
+
else:
|
| 93 |
+
raise NotImplementedError
|
| 94 |
+
return csrgeam(self, other, alpha, beta)
|
| 95 |
+
|
| 96 |
+
def _comparison(self, other, op, op_name):
|
| 97 |
+
if _util.isscalarlike(other):
|
| 98 |
+
data = cupy.asarray(other, dtype=self.dtype).reshape(1)
|
| 99 |
+
if numpy.isnan(data[0]):
|
| 100 |
+
if op_name == '_ne_':
|
| 101 |
+
return csr_matrix(cupy.ones(self.shape, dtype=numpy.bool_))
|
| 102 |
+
else:
|
| 103 |
+
return csr_matrix(self.shape, dtype=numpy.bool_)
|
| 104 |
+
indices = cupy.zeros((1,), dtype=numpy.int32)
|
| 105 |
+
indptr = cupy.arange(2, dtype=numpy.int32)
|
| 106 |
+
other = csr_matrix((data, indices, indptr), shape=(1, 1))
|
| 107 |
+
return binopt_csr(self, other, op_name)
|
| 108 |
+
elif _util.isdense(other):
|
| 109 |
+
return op(self.todense(), other)
|
| 110 |
+
elif isspmatrix_csr(other):
|
| 111 |
+
self.sum_duplicates()
|
| 112 |
+
other.sum_duplicates()
|
| 113 |
+
if op_name in ('_ne_', '_lt_', '_gt_'):
|
| 114 |
+
return binopt_csr(self, other, op_name)
|
| 115 |
+
|
| 116 |
+
warnings.warn(
|
| 117 |
+
"Comparing sparse matrices using ==, <=, and >= is "
|
| 118 |
+
"inefficient, try using !=, <, or > instead.",
|
| 119 |
+
SparseEfficiencyWarning)
|
| 120 |
+
if op_name == '_eq_':
|
| 121 |
+
opposite_op_name = '_ne_'
|
| 122 |
+
elif op_name == '_le_':
|
| 123 |
+
opposite_op_name = '_gt_'
|
| 124 |
+
elif op_name == '_ge_':
|
| 125 |
+
opposite_op_name = '_lt_'
|
| 126 |
+
res = binopt_csr(self, other, opposite_op_name)
|
| 127 |
+
out = cupy.logical_not(res.toarray())
|
| 128 |
+
return csr_matrix(out)
|
| 129 |
+
raise NotImplementedError
|
| 130 |
+
|
| 131 |
+
def __eq__(self, other):
|
| 132 |
+
return self._comparison(other, operator.eq, '_eq_')
|
| 133 |
+
|
| 134 |
+
def __ne__(self, other):
|
| 135 |
+
return self._comparison(other, operator.ne, '_ne_')
|
| 136 |
+
|
| 137 |
+
def __lt__(self, other):
|
| 138 |
+
return self._comparison(other, operator.lt, '_lt_')
|
| 139 |
+
|
| 140 |
+
def __gt__(self, other):
|
| 141 |
+
return self._comparison(other, operator.gt, '_gt_')
|
| 142 |
+
|
| 143 |
+
def __le__(self, other):
|
| 144 |
+
return self._comparison(other, operator.le, '_le_')
|
| 145 |
+
|
| 146 |
+
def __ge__(self, other):
|
| 147 |
+
return self._comparison(other, operator.ge, '_ge_')
|
| 148 |
+
|
| 149 |
+
def __mul__(self, other):
|
| 150 |
+
from cupyx import cusparse
|
| 151 |
+
|
| 152 |
+
if cupy.isscalar(other):
|
| 153 |
+
self.sum_duplicates()
|
| 154 |
+
return self._with_data(self.data * other)
|
| 155 |
+
elif isspmatrix_csr(other):
|
| 156 |
+
self.sum_duplicates()
|
| 157 |
+
other.sum_duplicates()
|
| 158 |
+
if cusparse.check_availability('spgemm'):
|
| 159 |
+
return cusparse.spgemm(self, other)
|
| 160 |
+
elif cusparse.check_availability('csrgemm2'):
|
| 161 |
+
return cusparse.csrgemm2(self, other)
|
| 162 |
+
elif cusparse.check_availability('csrgemm'):
|
| 163 |
+
return cusparse.csrgemm(self, other)
|
| 164 |
+
else:
|
| 165 |
+
raise AssertionError
|
| 166 |
+
elif _csc.isspmatrix_csc(other):
|
| 167 |
+
self.sum_duplicates()
|
| 168 |
+
other.sum_duplicates()
|
| 169 |
+
if cusparse.check_availability('csrgemm') and not runtime.is_hip:
|
| 170 |
+
# trans=True is still buggy as of ROCm 4.2.0
|
| 171 |
+
return cusparse.csrgemm(self, other.T, transb=True)
|
| 172 |
+
elif cusparse.check_availability('spgemm'):
|
| 173 |
+
b = other.tocsr()
|
| 174 |
+
b.sum_duplicates()
|
| 175 |
+
return cusparse.spgemm(self, b)
|
| 176 |
+
elif cusparse.check_availability('csrgemm2'):
|
| 177 |
+
b = other.tocsr()
|
| 178 |
+
b.sum_duplicates()
|
| 179 |
+
return cusparse.csrgemm2(self, b)
|
| 180 |
+
else:
|
| 181 |
+
raise AssertionError
|
| 182 |
+
elif _base.isspmatrix(other):
|
| 183 |
+
return self * other.tocsr()
|
| 184 |
+
elif _base.isdense(other):
|
| 185 |
+
if other.ndim == 0:
|
| 186 |
+
self.sum_duplicates()
|
| 187 |
+
return self._with_data(self.data * other)
|
| 188 |
+
elif other.ndim == 1:
|
| 189 |
+
self.sum_duplicates()
|
| 190 |
+
other = cupy.asfortranarray(other)
|
| 191 |
+
# need extra padding to ensure not stepping on the CUB bug,
|
| 192 |
+
# see cupy/cupy#3679 for discussion
|
| 193 |
+
is_cub_safe = (self.indptr.data.mem.size
|
| 194 |
+
> self.indptr.size * self.indptr.dtype.itemsize)
|
| 195 |
+
# CUB spmv is buggy since CUDA 11.0, see
|
| 196 |
+
# https://github.com/cupy/cupy/issues/3822#issuecomment-782607637
|
| 197 |
+
is_cub_safe &= (cub._get_cuda_build_version() < 11000)
|
| 198 |
+
for accelerator in _accelerator.get_routine_accelerators():
|
| 199 |
+
if (accelerator == _accelerator.ACCELERATOR_CUB
|
| 200 |
+
and not runtime.is_hip
|
| 201 |
+
and is_cub_safe and other.flags.c_contiguous):
|
| 202 |
+
return cub.device_csrmv(
|
| 203 |
+
self.shape[0], self.shape[1], self.nnz,
|
| 204 |
+
self.data, self.indptr, self.indices, other)
|
| 205 |
+
if (cusparse.check_availability('csrmvEx') and self.nnz > 0 and
|
| 206 |
+
cusparse.csrmvExIsAligned(self, other)):
|
| 207 |
+
# csrmvEx does not work if nnz == 0
|
| 208 |
+
csrmv = cusparse.csrmvEx
|
| 209 |
+
elif cusparse.check_availability('csrmv'):
|
| 210 |
+
csrmv = cusparse.csrmv
|
| 211 |
+
elif cusparse.check_availability('spmv'):
|
| 212 |
+
csrmv = cusparse.spmv
|
| 213 |
+
else:
|
| 214 |
+
raise AssertionError
|
| 215 |
+
return csrmv(self, other)
|
| 216 |
+
elif other.ndim == 2:
|
| 217 |
+
self.sum_duplicates()
|
| 218 |
+
if cusparse.check_availability('csrmm2'):
|
| 219 |
+
csrmm = cusparse.csrmm2
|
| 220 |
+
elif cusparse.check_availability('spmm'):
|
| 221 |
+
csrmm = cusparse.spmm
|
| 222 |
+
else:
|
| 223 |
+
raise AssertionError
|
| 224 |
+
return csrmm(self, cupy.asfortranarray(other))
|
| 225 |
+
else:
|
| 226 |
+
raise ValueError('could not interpret dimensions')
|
| 227 |
+
else:
|
| 228 |
+
return NotImplemented
|
| 229 |
+
|
| 230 |
+
def __div__(self, other):
|
| 231 |
+
raise NotImplementedError
|
| 232 |
+
|
| 233 |
+
def __rdiv__(self, other):
|
| 234 |
+
raise NotImplementedError
|
| 235 |
+
|
| 236 |
+
def __truediv__(self, other):
|
| 237 |
+
"""Point-wise division by another matrix, vector or scalar"""
|
| 238 |
+
if _util.isscalarlike(other):
|
| 239 |
+
dtype = self.dtype
|
| 240 |
+
if dtype == numpy.float32:
|
| 241 |
+
# Note: This is a work-around to make the output dtype the same
|
| 242 |
+
# as SciPy. It might be SciPy version dependent.
|
| 243 |
+
dtype = numpy.float64
|
| 244 |
+
dtype = cupy.result_type(dtype, other)
|
| 245 |
+
d = cupy.reciprocal(other, dtype=dtype)
|
| 246 |
+
return multiply_by_scalar(self, d)
|
| 247 |
+
elif _util.isdense(other):
|
| 248 |
+
other = cupy.atleast_2d(other)
|
| 249 |
+
other = cupy.broadcast_to(other, self.shape)
|
| 250 |
+
check_shape_for_pointwise_op(self.shape, other.shape)
|
| 251 |
+
ret = self.tocoo()
|
| 252 |
+
ret.data = _cupy_divide_by_dense()(
|
| 253 |
+
ret.data, ret.row, ret.col, ret.shape[1], other)
|
| 254 |
+
return ret
|
| 255 |
+
elif _base.isspmatrix(other):
|
| 256 |
+
# Note: If broadcasting is needed, an exception is raised here for
|
| 257 |
+
# compatibility with SciPy, as SciPy does not support broadcasting
|
| 258 |
+
# in the "sparse / sparse" case.
|
| 259 |
+
check_shape_for_pointwise_op(self.shape, other.shape,
|
| 260 |
+
allow_broadcasting=False)
|
| 261 |
+
dtype = numpy.promote_types(self.dtype, other.dtype)
|
| 262 |
+
if dtype.char not in 'FD':
|
| 263 |
+
dtype = numpy.promote_types(numpy.float64, dtype)
|
| 264 |
+
# Note: The following implementation converts two sparse matrices
|
| 265 |
+
# into dense matrices and then performs a point-wise division,
|
| 266 |
+
# which can use lots of memory.
|
| 267 |
+
self_dense = self.todense().astype(dtype, copy=False)
|
| 268 |
+
return self_dense / other.todense()
|
| 269 |
+
return NotImplemented
|
| 270 |
+
|
| 271 |
+
def __rtruediv__(self, other):
|
| 272 |
+
return NotImplemented
|
| 273 |
+
|
| 274 |
+
# TODO(unno): Implement check_format
|
| 275 |
+
|
| 276 |
+
def diagonal(self, k=0):
|
| 277 |
+
rows, cols = self.shape
|
| 278 |
+
ylen = min(rows + min(k, 0), cols - max(k, 0))
|
| 279 |
+
if ylen <= 0:
|
| 280 |
+
return cupy.empty(0, dtype=self.dtype)
|
| 281 |
+
self.sum_duplicates()
|
| 282 |
+
y = cupy.empty(ylen, dtype=self.dtype)
|
| 283 |
+
_cupy_csr_diagonal()(k, rows, cols, self.data, self.indptr,
|
| 284 |
+
self.indices, y)
|
| 285 |
+
return y
|
| 286 |
+
|
| 287 |
+
def eliminate_zeros(self):
|
| 288 |
+
"""Removes zero entories in place."""
|
| 289 |
+
from cupyx import cusparse
|
| 290 |
+
|
| 291 |
+
compress = cusparse.csr2csr_compress(self, 0)
|
| 292 |
+
self.data = compress.data
|
| 293 |
+
self.indices = compress.indices
|
| 294 |
+
self.indptr = compress.indptr
|
| 295 |
+
|
| 296 |
+
def _maximum_minimum(self, other, cupy_op, op_name, dense_check):
|
| 297 |
+
if _util.isscalarlike(other):
|
| 298 |
+
other = cupy.asarray(other, dtype=self.dtype)
|
| 299 |
+
if dense_check(other):
|
| 300 |
+
dtype = self.dtype
|
| 301 |
+
# Note: This is a work-around to make the output dtype the same
|
| 302 |
+
# as SciPy. It might be SciPy version dependent.
|
| 303 |
+
if dtype == numpy.float32:
|
| 304 |
+
dtype = numpy.float64
|
| 305 |
+
elif dtype == numpy.complex64:
|
| 306 |
+
dtype = numpy.complex128
|
| 307 |
+
dtype = cupy.result_type(dtype, other)
|
| 308 |
+
other = other.astype(dtype, copy=False)
|
| 309 |
+
# Note: The computation steps below are different from SciPy.
|
| 310 |
+
new_array = cupy_op(self.todense(), other)
|
| 311 |
+
return csr_matrix(new_array)
|
| 312 |
+
else:
|
| 313 |
+
self.sum_duplicates()
|
| 314 |
+
new_data = cupy_op(self.data, other)
|
| 315 |
+
return csr_matrix((new_data, self.indices, self.indptr),
|
| 316 |
+
shape=self.shape, dtype=self.dtype)
|
| 317 |
+
elif _util.isdense(other):
|
| 318 |
+
self.sum_duplicates()
|
| 319 |
+
other = cupy.atleast_2d(other)
|
| 320 |
+
return cupy_op(self.todense(), other)
|
| 321 |
+
elif isspmatrix_csr(other):
|
| 322 |
+
self.sum_duplicates()
|
| 323 |
+
other.sum_duplicates()
|
| 324 |
+
return binopt_csr(self, other, op_name)
|
| 325 |
+
raise NotImplementedError
|
| 326 |
+
|
| 327 |
+
def maximum(self, other):
|
| 328 |
+
return self._maximum_minimum(other, cupy.maximum, '_maximum_',
|
| 329 |
+
lambda x: x > 0)
|
| 330 |
+
|
| 331 |
+
def minimum(self, other):
|
| 332 |
+
return self._maximum_minimum(other, cupy.minimum, '_minimum_',
|
| 333 |
+
lambda x: x < 0)
|
| 334 |
+
|
| 335 |
+
def multiply(self, other):
|
| 336 |
+
"""Point-wise multiplication by another matrix, vector or scalar"""
|
| 337 |
+
if cupy.isscalar(other):
|
| 338 |
+
return multiply_by_scalar(self, other)
|
| 339 |
+
elif _util.isdense(other):
|
| 340 |
+
self.sum_duplicates()
|
| 341 |
+
other = cupy.atleast_2d(other)
|
| 342 |
+
return multiply_by_dense(self, other)
|
| 343 |
+
elif isspmatrix_csr(other):
|
| 344 |
+
self.sum_duplicates()
|
| 345 |
+
other.sum_duplicates()
|
| 346 |
+
return multiply_by_csr(self, other)
|
| 347 |
+
else:
|
| 348 |
+
msg = 'expected scalar, dense matrix/vector or csr matrix'
|
| 349 |
+
raise TypeError(msg)
|
| 350 |
+
|
| 351 |
+
# TODO(unno): Implement prune
|
| 352 |
+
|
| 353 |
+
def setdiag(self, values, k=0):
|
| 354 |
+
"""Set diagonal or off-diagonal elements of the array."""
|
| 355 |
+
rows, cols = self.shape
|
| 356 |
+
row_st, col_st = max(0, -k), max(0, k)
|
| 357 |
+
x_len = min(rows - row_st, cols - col_st)
|
| 358 |
+
if x_len <= 0:
|
| 359 |
+
raise ValueError('k exceeds matrix dimensions')
|
| 360 |
+
values = values.astype(self.dtype)
|
| 361 |
+
if values.ndim == 0:
|
| 362 |
+
# broadcast
|
| 363 |
+
x_data = cupy.full((x_len,), values, dtype=self.dtype)
|
| 364 |
+
else:
|
| 365 |
+
x_len = min(x_len, values.size)
|
| 366 |
+
x_data = values[:x_len]
|
| 367 |
+
x_indices = cupy.arange(col_st, col_st + x_len, dtype='i')
|
| 368 |
+
x_indptr = cupy.zeros((rows + 1,), dtype='i')
|
| 369 |
+
x_indptr[row_st:row_st+x_len+1] = cupy.arange(x_len+1, dtype='i')
|
| 370 |
+
x_indptr[row_st+x_len+1:] = x_len
|
| 371 |
+
x_data -= self.diagonal(k=k)[:x_len]
|
| 372 |
+
y = self + csr_matrix((x_data, x_indices, x_indptr), shape=self.shape)
|
| 373 |
+
self.data = y.data
|
| 374 |
+
self.indices = y.indices
|
| 375 |
+
self.indptr = y.indptr
|
| 376 |
+
|
| 377 |
+
def sort_indices(self):
|
| 378 |
+
"""Sorts the indices of this matrix *in place*.
|
| 379 |
+
|
| 380 |
+
.. warning::
|
| 381 |
+
Calling this function might synchronize the device.
|
| 382 |
+
|
| 383 |
+
"""
|
| 384 |
+
from cupyx import cusparse
|
| 385 |
+
|
| 386 |
+
if not self.has_sorted_indices:
|
| 387 |
+
cusparse.csrsort(self)
|
| 388 |
+
self.has_sorted_indices = True
|
| 389 |
+
|
| 390 |
+
def toarray(self, order=None, out=None):
|
| 391 |
+
"""Returns a dense matrix representing the same value.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
order ({'C', 'F', None}): Whether to store data in C (row-major)
|
| 395 |
+
order or F (column-major) order. Default is C-order.
|
| 396 |
+
out: Not supported.
|
| 397 |
+
|
| 398 |
+
Returns:
|
| 399 |
+
cupy.ndarray: Dense array representing the same matrix.
|
| 400 |
+
|
| 401 |
+
.. seealso:: :meth:`scipy.sparse.csr_matrix.toarray`
|
| 402 |
+
|
| 403 |
+
"""
|
| 404 |
+
from cupyx import cusparse
|
| 405 |
+
|
| 406 |
+
order = 'C' if order is None else order.upper()
|
| 407 |
+
if self.nnz == 0:
|
| 408 |
+
return cupy.zeros(shape=self.shape, dtype=self.dtype, order=order)
|
| 409 |
+
|
| 410 |
+
if self.dtype.char not in 'fdFD':
|
| 411 |
+
return csr2dense(self, order)
|
| 412 |
+
|
| 413 |
+
x = self.copy()
|
| 414 |
+
x.has_canonical_format = False # need to enforce sum_duplicates
|
| 415 |
+
x.sum_duplicates()
|
| 416 |
+
if (cusparse.check_availability('sparseToDense')
|
| 417 |
+
and (not runtime.is_hip or (x.nnz > 0))):
|
| 418 |
+
# On HIP, nnz=0 is problematic as of ROCm 4.2.0
|
| 419 |
+
y = cusparse.sparseToDense(x)
|
| 420 |
+
if order == 'F':
|
| 421 |
+
return y
|
| 422 |
+
elif order == 'C':
|
| 423 |
+
return cupy.ascontiguousarray(y)
|
| 424 |
+
else:
|
| 425 |
+
raise ValueError('order not understood')
|
| 426 |
+
else:
|
| 427 |
+
# csr2dense returns F-contiguous array.
|
| 428 |
+
if order == 'C':
|
| 429 |
+
# To return C-contiguous array, it uses transpose.
|
| 430 |
+
return cusparse.csc2dense(x.T).T
|
| 431 |
+
elif order == 'F':
|
| 432 |
+
return cusparse.csr2dense(x)
|
| 433 |
+
else:
|
| 434 |
+
raise ValueError('order not understood')
|
| 435 |
+
|
| 436 |
+
def tobsr(self, blocksize=None, copy=False):
|
| 437 |
+
# TODO(unno): Implement tobsr
|
| 438 |
+
raise NotImplementedError
|
| 439 |
+
|
| 440 |
+
def tocoo(self, copy=False):
|
| 441 |
+
"""Converts the matrix to COOrdinate format.
|
| 442 |
+
|
| 443 |
+
Args:
|
| 444 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 445 |
+
possible.
|
| 446 |
+
|
| 447 |
+
Returns:
|
| 448 |
+
cupyx.scipy.sparse.coo_matrix: Converted matrix.
|
| 449 |
+
|
| 450 |
+
"""
|
| 451 |
+
from cupyx import cusparse
|
| 452 |
+
|
| 453 |
+
if copy:
|
| 454 |
+
data = self.data.copy()
|
| 455 |
+
indices = self.indices.copy()
|
| 456 |
+
else:
|
| 457 |
+
data = self.data
|
| 458 |
+
indices = self.indices
|
| 459 |
+
|
| 460 |
+
return cusparse.csr2coo(self, data, indices)
|
| 461 |
+
|
| 462 |
+
def tocsc(self, copy=False):
|
| 463 |
+
"""Converts the matrix to Compressed Sparse Column format.
|
| 464 |
+
|
| 465 |
+
Args:
|
| 466 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 467 |
+
possible. Actually this option is ignored because all
|
| 468 |
+
arrays in a matrix cannot be shared in csr to csc conversion.
|
| 469 |
+
|
| 470 |
+
Returns:
|
| 471 |
+
cupyx.scipy.sparse.csc_matrix: Converted matrix.
|
| 472 |
+
|
| 473 |
+
"""
|
| 474 |
+
from cupyx import cusparse
|
| 475 |
+
|
| 476 |
+
# copy is ignored
|
| 477 |
+
if cusparse.check_availability('csr2csc'):
|
| 478 |
+
csr2csc = cusparse.csr2csc
|
| 479 |
+
elif cusparse.check_availability('csr2cscEx2'):
|
| 480 |
+
csr2csc = cusparse.csr2cscEx2
|
| 481 |
+
else:
|
| 482 |
+
raise NotImplementedError
|
| 483 |
+
# don't touch has_sorted_indices, as cuSPARSE made no guarantee
|
| 484 |
+
return csr2csc(self)
|
| 485 |
+
|
| 486 |
+
def tocsr(self, copy=False):
|
| 487 |
+
"""Converts the matrix to Compressed Sparse Row format.
|
| 488 |
+
|
| 489 |
+
Args:
|
| 490 |
+
copy (bool): If ``False``, the method returns itself.
|
| 491 |
+
Otherwise it makes a copy of the matrix.
|
| 492 |
+
|
| 493 |
+
Returns:
|
| 494 |
+
cupyx.scipy.sparse.csr_matrix: Converted matrix.
|
| 495 |
+
|
| 496 |
+
"""
|
| 497 |
+
if copy:
|
| 498 |
+
return self.copy()
|
| 499 |
+
else:
|
| 500 |
+
return self
|
| 501 |
+
|
| 502 |
+
def _tocsx(self):
|
| 503 |
+
"""Inverts the format.
|
| 504 |
+
"""
|
| 505 |
+
return self.tocsc()
|
| 506 |
+
|
| 507 |
+
def todia(self, copy=False):
|
| 508 |
+
# TODO(unno): Implement todia
|
| 509 |
+
raise NotImplementedError
|
| 510 |
+
|
| 511 |
+
def todok(self, copy=False):
|
| 512 |
+
# TODO(unno): Implement todok
|
| 513 |
+
raise NotImplementedError
|
| 514 |
+
|
| 515 |
+
def tolil(self, copy=False):
|
| 516 |
+
# TODO(unno): Implement tolil
|
| 517 |
+
raise NotImplementedError
|
| 518 |
+
|
| 519 |
+
def transpose(self, axes=None, copy=False):
|
| 520 |
+
"""Returns a transpose matrix.
|
| 521 |
+
|
| 522 |
+
Args:
|
| 523 |
+
axes: This option is not supported.
|
| 524 |
+
copy (bool): If ``True``, a returned matrix shares no data.
|
| 525 |
+
Otherwise, it shared data arrays as much as possible.
|
| 526 |
+
|
| 527 |
+
Returns:
|
| 528 |
+
cupyx.scipy.sparse.csc_matrix: `self` with the dimensions reversed.
|
| 529 |
+
|
| 530 |
+
"""
|
| 531 |
+
if axes is not None:
|
| 532 |
+
raise ValueError(
|
| 533 |
+
'Sparse matrices do not support an \'axes\' parameter because '
|
| 534 |
+
'swapping dimensions is the only logical permutation.')
|
| 535 |
+
|
| 536 |
+
shape = self.shape[1], self.shape[0]
|
| 537 |
+
trans = _csc.csc_matrix(
|
| 538 |
+
(self.data, self.indices, self.indptr), shape=shape, copy=copy)
|
| 539 |
+
trans.has_canonical_format = self.has_canonical_format
|
| 540 |
+
return trans
|
| 541 |
+
|
| 542 |
+
def getrow(self, i):
|
| 543 |
+
"""Returns a copy of row i of the matrix, as a (1 x n)
|
| 544 |
+
CSR matrix (row vector).
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
i (integer): Row
|
| 548 |
+
|
| 549 |
+
Returns:
|
| 550 |
+
cupyx.scipy.sparse.csr_matrix: Sparse matrix with single row
|
| 551 |
+
"""
|
| 552 |
+
return self._major_slice(slice(i, i + 1), copy=True)
|
| 553 |
+
|
| 554 |
+
def getcol(self, i):
|
| 555 |
+
"""Returns a copy of column i of the matrix, as a (m x 1)
|
| 556 |
+
CSR matrix (column vector).
|
| 557 |
+
|
| 558 |
+
Args:
|
| 559 |
+
i (integer): Column
|
| 560 |
+
|
| 561 |
+
Returns:
|
| 562 |
+
cupyx.scipy.sparse.csr_matrix: Sparse matrix with single column
|
| 563 |
+
"""
|
| 564 |
+
return self._minor_slice(slice(i, i + 1), copy=True)
|
| 565 |
+
|
| 566 |
+
def _get_intXarray(self, row, col):
|
| 567 |
+
row = slice(row, row + 1)
|
| 568 |
+
return self._major_slice(row)._minor_index_fancy(col)
|
| 569 |
+
|
| 570 |
+
def _get_intXslice(self, row, col):
|
| 571 |
+
row = slice(row, row + 1)
|
| 572 |
+
return self._major_slice(row)._minor_slice(col, copy=True)
|
| 573 |
+
|
| 574 |
+
def _get_sliceXint(self, row, col):
|
| 575 |
+
col = slice(col, col + 1)
|
| 576 |
+
copy = row.step in (1, None)
|
| 577 |
+
return self._major_slice(row)._minor_slice(col, copy=copy)
|
| 578 |
+
|
| 579 |
+
def _get_sliceXarray(self, row, col):
|
| 580 |
+
return self._major_slice(row)._minor_index_fancy(col)
|
| 581 |
+
|
| 582 |
+
def _get_arrayXint(self, row, col):
|
| 583 |
+
col = slice(col, col + 1)
|
| 584 |
+
return self._major_index_fancy(row)._minor_slice(col)
|
| 585 |
+
|
| 586 |
+
def _get_arrayXslice(self, row, col):
|
| 587 |
+
if col.step not in (1, None):
|
| 588 |
+
start, stop, step = col.indices(self.shape[1])
|
| 589 |
+
cols = cupy.arange(start, stop, step, self.indices.dtype)
|
| 590 |
+
return self._get_arrayXarray(row, cols)
|
| 591 |
+
return self._major_index_fancy(row)._minor_slice(col)
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
def isspmatrix_csr(x):
|
| 595 |
+
"""Checks if a given matrix is of CSR format.
|
| 596 |
+
|
| 597 |
+
Returns:
|
| 598 |
+
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.csr_matrix`.
|
| 599 |
+
|
| 600 |
+
"""
|
| 601 |
+
return isinstance(x, csr_matrix)
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def check_shape_for_pointwise_op(a_shape, b_shape, allow_broadcasting=True):
|
| 605 |
+
if allow_broadcasting:
|
| 606 |
+
a_m, a_n = a_shape
|
| 607 |
+
b_m, b_n = b_shape
|
| 608 |
+
if not (a_m == b_m or a_m == 1 or b_m == 1):
|
| 609 |
+
raise ValueError('inconsistent shape')
|
| 610 |
+
if not (a_n == b_n or a_n == 1 or b_n == 1):
|
| 611 |
+
raise ValueError('inconsistent shape')
|
| 612 |
+
else:
|
| 613 |
+
if a_shape != b_shape:
|
| 614 |
+
raise ValueError('inconsistent shape')
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
def multiply_by_scalar(sp, a):
|
| 618 |
+
data = sp.data * a
|
| 619 |
+
indices = sp.indices.copy()
|
| 620 |
+
indptr = sp.indptr.copy()
|
| 621 |
+
return csr_matrix((data, indices, indptr), shape=sp.shape)
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def multiply_by_dense(sp, dn):
|
| 625 |
+
check_shape_for_pointwise_op(sp.shape, dn.shape)
|
| 626 |
+
sp_m, sp_n = sp.shape
|
| 627 |
+
dn_m, dn_n = dn.shape
|
| 628 |
+
m, n = max(sp_m, dn_m), max(sp_n, dn_n)
|
| 629 |
+
nnz = sp.nnz * (m // sp_m) * (n // sp_n)
|
| 630 |
+
dtype = numpy.promote_types(sp.dtype, dn.dtype)
|
| 631 |
+
data = cupy.empty(nnz, dtype=dtype)
|
| 632 |
+
indices = cupy.empty(nnz, dtype=sp.indices.dtype)
|
| 633 |
+
if m > sp_m:
|
| 634 |
+
if n > sp_n:
|
| 635 |
+
indptr = cupy.arange(0, nnz+1, n, dtype=sp.indptr.dtype)
|
| 636 |
+
else:
|
| 637 |
+
indptr = cupy.arange(0, nnz+1, sp.nnz, dtype=sp.indptr.dtype)
|
| 638 |
+
else:
|
| 639 |
+
indptr = sp.indptr.copy()
|
| 640 |
+
if n > sp_n:
|
| 641 |
+
indptr *= n
|
| 642 |
+
|
| 643 |
+
# out = sp * dn
|
| 644 |
+
cupy_multiply_by_dense()(sp.data, sp.indptr, sp.indices, sp_m, sp_n,
|
| 645 |
+
dn, dn_m, dn_n, indptr, m, n, data, indices)
|
| 646 |
+
|
| 647 |
+
return csr_matrix((data, indices, indptr), shape=(m, n))
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
_GET_ROW_ID_ = '''
|
| 651 |
+
__device__ inline int get_row_id(int i, int min, int max, const int *indptr) {
|
| 652 |
+
int row = (min + max) / 2;
|
| 653 |
+
while (min < max) {
|
| 654 |
+
if (i < indptr[row]) {
|
| 655 |
+
max = row - 1;
|
| 656 |
+
} else if (i >= indptr[row + 1]) {
|
| 657 |
+
min = row + 1;
|
| 658 |
+
} else {
|
| 659 |
+
break;
|
| 660 |
+
}
|
| 661 |
+
row = (min + max) / 2;
|
| 662 |
+
}
|
| 663 |
+
return row;
|
| 664 |
+
}
|
| 665 |
+
'''
|
| 666 |
+
|
| 667 |
+
_FIND_INDEX_HOLDING_COL_IN_ROW_ = '''
|
| 668 |
+
__device__ inline int find_index_holding_col_in_row(
|
| 669 |
+
int row, int col, const int *indptr, const int *indices) {
|
| 670 |
+
int j_min = indptr[row];
|
| 671 |
+
int j_max = indptr[row+1] - 1;
|
| 672 |
+
while (j_min <= j_max) {
|
| 673 |
+
int j = (j_min + j_max) / 2;
|
| 674 |
+
int j_col = indices[j];
|
| 675 |
+
if (j_col == col) {
|
| 676 |
+
return j;
|
| 677 |
+
} else if (j_col < col) {
|
| 678 |
+
j_min = j + 1;
|
| 679 |
+
} else {
|
| 680 |
+
j_max = j - 1;
|
| 681 |
+
}
|
| 682 |
+
}
|
| 683 |
+
return -1;
|
| 684 |
+
}
|
| 685 |
+
'''
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
@cupy._util.memoize(for_each_device=True)
|
| 689 |
+
def cupy_multiply_by_dense():
|
| 690 |
+
return cupy.ElementwiseKernel(
|
| 691 |
+
'''
|
| 692 |
+
raw S SP_DATA, raw I SP_INDPTR, raw I SP_INDICES,
|
| 693 |
+
int32 SP_M, int32 SP_N,
|
| 694 |
+
raw D DN_DATA, int32 DN_M, int32 DN_N,
|
| 695 |
+
raw I OUT_INDPTR, int32 OUT_M, int32 OUT_N
|
| 696 |
+
''',
|
| 697 |
+
'O OUT_DATA, I OUT_INDICES',
|
| 698 |
+
'''
|
| 699 |
+
int i_out = i;
|
| 700 |
+
int m_out = get_row_id(i_out, 0, OUT_M - 1, &(OUT_INDPTR[0]));
|
| 701 |
+
int i_sp = i_out;
|
| 702 |
+
if (OUT_M > SP_M && SP_M == 1) {
|
| 703 |
+
i_sp -= OUT_INDPTR[m_out];
|
| 704 |
+
}
|
| 705 |
+
if (OUT_N > SP_N && SP_N == 1) {
|
| 706 |
+
i_sp /= OUT_N;
|
| 707 |
+
}
|
| 708 |
+
int n_out = SP_INDICES[i_sp];
|
| 709 |
+
if (OUT_N > SP_N && SP_N == 1) {
|
| 710 |
+
n_out = i_out - OUT_INDPTR[m_out];
|
| 711 |
+
}
|
| 712 |
+
int m_dn = m_out;
|
| 713 |
+
if (OUT_M > DN_M && DN_M == 1) {
|
| 714 |
+
m_dn = 0;
|
| 715 |
+
}
|
| 716 |
+
int n_dn = n_out;
|
| 717 |
+
if (OUT_N > DN_N && DN_N == 1) {
|
| 718 |
+
n_dn = 0;
|
| 719 |
+
}
|
| 720 |
+
OUT_DATA = (O)(SP_DATA[i_sp] * DN_DATA[n_dn + (DN_N * m_dn)]);
|
| 721 |
+
OUT_INDICES = n_out;
|
| 722 |
+
''',
|
| 723 |
+
'cupyx_scipy_sparse_csr_multiply_by_dense',
|
| 724 |
+
preamble=_GET_ROW_ID_
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
@cupy._util.memoize(for_each_device=True)
|
| 729 |
+
def _cupy_divide_by_dense():
|
| 730 |
+
return cupy.ElementwiseKernel(
|
| 731 |
+
'T data, I row, I col, I width, raw T other',
|
| 732 |
+
'T res',
|
| 733 |
+
'''
|
| 734 |
+
res = data / other[row * width + col]
|
| 735 |
+
''',
|
| 736 |
+
'cupyx_scipy_sparse_coo_divide_dense',
|
| 737 |
+
)
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
def multiply_by_csr(a, b):
|
| 741 |
+
check_shape_for_pointwise_op(a.shape, b.shape)
|
| 742 |
+
a_m, a_n = a.shape
|
| 743 |
+
b_m, b_n = b.shape
|
| 744 |
+
m, n = max(a_m, b_m), max(a_n, b_n)
|
| 745 |
+
a_nnz = a.nnz * (m // a_m) * (n // a_n)
|
| 746 |
+
b_nnz = b.nnz * (m // b_m) * (n // b_n)
|
| 747 |
+
if a_nnz > b_nnz:
|
| 748 |
+
return multiply_by_csr(b, a)
|
| 749 |
+
c_nnz = a_nnz
|
| 750 |
+
dtype = numpy.promote_types(a.dtype, b.dtype)
|
| 751 |
+
c_data = cupy.empty(c_nnz, dtype=dtype)
|
| 752 |
+
c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype)
|
| 753 |
+
if m > a_m:
|
| 754 |
+
if n > a_n:
|
| 755 |
+
c_indptr = cupy.arange(0, c_nnz+1, n, dtype=a.indptr.dtype)
|
| 756 |
+
else:
|
| 757 |
+
c_indptr = cupy.arange(0, c_nnz+1, a.nnz, dtype=a.indptr.dtype)
|
| 758 |
+
else:
|
| 759 |
+
c_indptr = a.indptr.copy()
|
| 760 |
+
if n > a_n:
|
| 761 |
+
c_indptr *= n
|
| 762 |
+
flags = cupy.zeros(c_nnz+1, dtype=a.indices.dtype)
|
| 763 |
+
nnz_each_row = cupy.zeros(m+1, dtype=a.indptr.dtype)
|
| 764 |
+
|
| 765 |
+
# compute c = a * b where necessary and get sparsity pattern of matrix d
|
| 766 |
+
cupy_multiply_by_csr_step1()(
|
| 767 |
+
a.data, a.indptr, a.indices, a_m, a_n,
|
| 768 |
+
b.data, b.indptr, b.indices, b_m, b_n,
|
| 769 |
+
c_indptr, m, n, c_data, c_indices, flags, nnz_each_row)
|
| 770 |
+
|
| 771 |
+
flags = cupy.cumsum(flags, dtype=a.indptr.dtype)
|
| 772 |
+
d_indptr = cupy.cumsum(nnz_each_row, dtype=a.indptr.dtype)
|
| 773 |
+
d_nnz = int(d_indptr[-1])
|
| 774 |
+
d_data = cupy.empty(d_nnz, dtype=dtype)
|
| 775 |
+
d_indices = cupy.empty(d_nnz, dtype=a.indices.dtype)
|
| 776 |
+
|
| 777 |
+
# remove zero elements in matrix c
|
| 778 |
+
cupy_multiply_by_csr_step2()(c_data, c_indices, flags, d_data, d_indices)
|
| 779 |
+
|
| 780 |
+
return csr_matrix((d_data, d_indices, d_indptr), shape=(m, n))
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
@cupy._util.memoize(for_each_device=True)
|
| 784 |
+
def cupy_multiply_by_csr_step1():
|
| 785 |
+
return cupy.ElementwiseKernel(
|
| 786 |
+
'''
|
| 787 |
+
raw A A_DATA, raw I A_INDPTR, raw I A_INDICES, int32 A_M, int32 A_N,
|
| 788 |
+
raw B B_DATA, raw I B_INDPTR, raw I B_INDICES, int32 B_M, int32 B_N,
|
| 789 |
+
raw I C_INDPTR, int32 C_M, int32 C_N
|
| 790 |
+
''',
|
| 791 |
+
'C C_DATA, I C_INDICES, raw I FLAGS, raw I NNZ_EACH_ROW',
|
| 792 |
+
'''
|
| 793 |
+
int i_c = i;
|
| 794 |
+
int m_c = get_row_id(i_c, 0, C_M - 1, &(C_INDPTR[0]));
|
| 795 |
+
|
| 796 |
+
int i_a = i;
|
| 797 |
+
if (C_M > A_M && A_M == 1) {
|
| 798 |
+
i_a -= C_INDPTR[m_c];
|
| 799 |
+
}
|
| 800 |
+
if (C_N > A_N && A_N == 1) {
|
| 801 |
+
i_a /= C_N;
|
| 802 |
+
}
|
| 803 |
+
int n_c = A_INDICES[i_a];
|
| 804 |
+
if (C_N > A_N && A_N == 1) {
|
| 805 |
+
n_c = i % C_N;
|
| 806 |
+
}
|
| 807 |
+
int m_b = m_c;
|
| 808 |
+
if (C_M > B_M && B_M == 1) {
|
| 809 |
+
m_b = 0;
|
| 810 |
+
}
|
| 811 |
+
int n_b = n_c;
|
| 812 |
+
if (C_N > B_N && B_N == 1) {
|
| 813 |
+
n_b = 0;
|
| 814 |
+
}
|
| 815 |
+
int i_b = find_index_holding_col_in_row(m_b, n_b,
|
| 816 |
+
&(B_INDPTR[0]), &(B_INDICES[0]));
|
| 817 |
+
if (i_b >= 0) {
|
| 818 |
+
atomicAdd(&(NNZ_EACH_ROW[m_c+1]), 1);
|
| 819 |
+
FLAGS[i+1] = 1;
|
| 820 |
+
C_DATA = (C)(A_DATA[i_a] * B_DATA[i_b]);
|
| 821 |
+
C_INDICES = n_c;
|
| 822 |
+
}
|
| 823 |
+
''',
|
| 824 |
+
'cupyx_scipy_sparse_csr_multiply_by_csr_step1',
|
| 825 |
+
preamble=_GET_ROW_ID_ + _FIND_INDEX_HOLDING_COL_IN_ROW_
|
| 826 |
+
)
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
@cupy._util.memoize(for_each_device=True)
|
| 830 |
+
def cupy_multiply_by_csr_step2():
|
| 831 |
+
return cupy.ElementwiseKernel(
|
| 832 |
+
'T C_DATA, I C_INDICES, raw I FLAGS',
|
| 833 |
+
'raw D D_DATA, raw I D_INDICES',
|
| 834 |
+
'''
|
| 835 |
+
int j = FLAGS[i];
|
| 836 |
+
if (j < FLAGS[i+1]) {
|
| 837 |
+
D_DATA[j] = (D)(C_DATA);
|
| 838 |
+
D_INDICES[j] = C_INDICES;
|
| 839 |
+
}
|
| 840 |
+
''',
|
| 841 |
+
'cupyx_scipy_sparse_csr_multiply_by_csr_step2'
|
| 842 |
+
)
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
_BINOPT_MAX_ = '''
|
| 846 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 847 |
+
return max(in1, in2);
|
| 848 |
+
}
|
| 849 |
+
'''
|
| 850 |
+
_BINOPT_MIN_ = '''
|
| 851 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 852 |
+
return min(in1, in2);
|
| 853 |
+
}
|
| 854 |
+
'''
|
| 855 |
+
_BINOPT_EQ_ = '''
|
| 856 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 857 |
+
return (in1 == in2);
|
| 858 |
+
}
|
| 859 |
+
'''
|
| 860 |
+
_BINOPT_NE_ = '''
|
| 861 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 862 |
+
return (in1 != in2);
|
| 863 |
+
}
|
| 864 |
+
'''
|
| 865 |
+
_BINOPT_LT_ = '''
|
| 866 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 867 |
+
return (in1 < in2);
|
| 868 |
+
}
|
| 869 |
+
'''
|
| 870 |
+
_BINOPT_GT_ = '''
|
| 871 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 872 |
+
return (in1 > in2);
|
| 873 |
+
}
|
| 874 |
+
'''
|
| 875 |
+
_BINOPT_LE_ = '''
|
| 876 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 877 |
+
return (in1 <= in2);
|
| 878 |
+
}
|
| 879 |
+
'''
|
| 880 |
+
_BINOPT_GE_ = '''
|
| 881 |
+
__device__ inline O binopt(T in1, T in2) {
|
| 882 |
+
return (in1 >= in2);
|
| 883 |
+
}
|
| 884 |
+
'''
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def binopt_csr(a, b, op_name):
|
| 888 |
+
check_shape_for_pointwise_op(a.shape, b.shape)
|
| 889 |
+
a_m, a_n = a.shape
|
| 890 |
+
b_m, b_n = b.shape
|
| 891 |
+
m, n = max(a_m, b_m), max(a_n, b_n)
|
| 892 |
+
a_nnz = a.nnz * (m // a_m) * (n // a_n)
|
| 893 |
+
b_nnz = b.nnz * (m // b_m) * (n // b_n)
|
| 894 |
+
|
| 895 |
+
a_info = cupy.zeros(a_nnz + 1, dtype=a.indices.dtype)
|
| 896 |
+
b_info = cupy.zeros(b_nnz + 1, dtype=b.indices.dtype)
|
| 897 |
+
a_valid = cupy.zeros(a_nnz, dtype=numpy.int8)
|
| 898 |
+
b_valid = cupy.zeros(b_nnz, dtype=numpy.int8)
|
| 899 |
+
c_indptr = cupy.zeros(m + 1, dtype=a.indptr.dtype)
|
| 900 |
+
in_dtype = numpy.promote_types(a.dtype, b.dtype)
|
| 901 |
+
a_data = a.data.astype(in_dtype, copy=False)
|
| 902 |
+
b_data = b.data.astype(in_dtype, copy=False)
|
| 903 |
+
funcs = _GET_ROW_ID_
|
| 904 |
+
if op_name == '_maximum_':
|
| 905 |
+
funcs += _BINOPT_MAX_
|
| 906 |
+
out_dtype = in_dtype
|
| 907 |
+
elif op_name == '_minimum_':
|
| 908 |
+
funcs += _BINOPT_MIN_
|
| 909 |
+
out_dtype = in_dtype
|
| 910 |
+
elif op_name == '_eq_':
|
| 911 |
+
funcs += _BINOPT_EQ_
|
| 912 |
+
out_dtype = numpy.bool_
|
| 913 |
+
elif op_name == '_ne_':
|
| 914 |
+
funcs += _BINOPT_NE_
|
| 915 |
+
out_dtype = numpy.bool_
|
| 916 |
+
elif op_name == '_lt_':
|
| 917 |
+
funcs += _BINOPT_LT_
|
| 918 |
+
out_dtype = numpy.bool_
|
| 919 |
+
elif op_name == '_gt_':
|
| 920 |
+
funcs += _BINOPT_GT_
|
| 921 |
+
out_dtype = numpy.bool_
|
| 922 |
+
elif op_name == '_le_':
|
| 923 |
+
funcs += _BINOPT_LE_
|
| 924 |
+
out_dtype = numpy.bool_
|
| 925 |
+
elif op_name == '_ge_':
|
| 926 |
+
funcs += _BINOPT_GE_
|
| 927 |
+
out_dtype = numpy.bool_
|
| 928 |
+
else:
|
| 929 |
+
raise ValueError('invalid op_name: {}'.format(op_name))
|
| 930 |
+
a_tmp_data = cupy.empty(a_nnz, dtype=out_dtype)
|
| 931 |
+
b_tmp_data = cupy.empty(b_nnz, dtype=out_dtype)
|
| 932 |
+
a_tmp_indices = cupy.empty(a_nnz, dtype=a.indices.dtype)
|
| 933 |
+
b_tmp_indices = cupy.empty(b_nnz, dtype=b.indices.dtype)
|
| 934 |
+
_size = a_nnz + b_nnz
|
| 935 |
+
cupy_binopt_csr_step1(op_name, preamble=funcs)(
|
| 936 |
+
m, n,
|
| 937 |
+
a.indptr, a.indices, a_data, a_m, a_n, a.nnz, a_nnz,
|
| 938 |
+
b.indptr, b.indices, b_data, b_m, b_n, b.nnz, b_nnz,
|
| 939 |
+
a_info, a_valid, a_tmp_indices, a_tmp_data,
|
| 940 |
+
b_info, b_valid, b_tmp_indices, b_tmp_data,
|
| 941 |
+
c_indptr, size=_size)
|
| 942 |
+
a_info = cupy.cumsum(a_info, dtype=a_info.dtype)
|
| 943 |
+
b_info = cupy.cumsum(b_info, dtype=b_info.dtype)
|
| 944 |
+
c_indptr = cupy.cumsum(c_indptr, dtype=c_indptr.dtype)
|
| 945 |
+
c_nnz = int(c_indptr[-1])
|
| 946 |
+
c_indices = cupy.empty(c_nnz, dtype=a.indices.dtype)
|
| 947 |
+
c_data = cupy.empty(c_nnz, dtype=out_dtype)
|
| 948 |
+
cupy_binopt_csr_step2(op_name)(
|
| 949 |
+
a_info, a_valid, a_tmp_indices, a_tmp_data, a_nnz,
|
| 950 |
+
b_info, b_valid, b_tmp_indices, b_tmp_data, b_nnz,
|
| 951 |
+
c_indices, c_data, size=_size)
|
| 952 |
+
return csr_matrix((c_data, c_indices, c_indptr), shape=(m, n))
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
@cupy._util.memoize(for_each_device=True)
|
| 956 |
+
def cupy_binopt_csr_step1(op_name, preamble=''):
|
| 957 |
+
name = 'cupyx_scipy_sparse_csr_binopt_' + op_name + 'step1'
|
| 958 |
+
return cupy.ElementwiseKernel(
|
| 959 |
+
'''
|
| 960 |
+
int32 M, int32 N,
|
| 961 |
+
raw I A_INDPTR, raw I A_INDICES, raw T A_DATA,
|
| 962 |
+
int32 A_M, int32 A_N, int32 A_NNZ_ACT, int32 A_NNZ,
|
| 963 |
+
raw I B_INDPTR, raw I B_INDICES, raw T B_DATA,
|
| 964 |
+
int32 B_M, int32 B_N, int32 B_NNZ_ACT, int32 B_NNZ
|
| 965 |
+
''',
|
| 966 |
+
'''
|
| 967 |
+
raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA,
|
| 968 |
+
raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA,
|
| 969 |
+
raw I C_INFO
|
| 970 |
+
''',
|
| 971 |
+
'''
|
| 972 |
+
if (i >= A_NNZ + B_NNZ) return;
|
| 973 |
+
|
| 974 |
+
const int *MY_INDPTR, *MY_INDICES; int *MY_INFO; const T *MY_DATA;
|
| 975 |
+
const int *OP_INDPTR, *OP_INDICES; int *OP_INFO; const T *OP_DATA;
|
| 976 |
+
int MY_M, MY_N, MY_NNZ_ACT, MY_NNZ;
|
| 977 |
+
int OP_M, OP_N, OP_NNZ_ACT, OP_NNZ;
|
| 978 |
+
signed char *MY_VALID; I *MY_TMP_INDICES; O *MY_TMP_DATA;
|
| 979 |
+
|
| 980 |
+
int my_j;
|
| 981 |
+
if (i < A_NNZ) {
|
| 982 |
+
// in charge of one of non-zero element of sparse matrix A
|
| 983 |
+
my_j = i;
|
| 984 |
+
MY_INDPTR = &(A_INDPTR[0]); OP_INDPTR = &(B_INDPTR[0]);
|
| 985 |
+
MY_INDICES = &(A_INDICES[0]); OP_INDICES = &(B_INDICES[0]);
|
| 986 |
+
MY_INFO = &(A_INFO[0]); OP_INFO = &(B_INFO[0]);
|
| 987 |
+
MY_DATA = &(A_DATA[0]); OP_DATA = &(B_DATA[0]);
|
| 988 |
+
MY_M = A_M; OP_M = B_M;
|
| 989 |
+
MY_N = A_N; OP_N = B_N;
|
| 990 |
+
MY_NNZ_ACT = A_NNZ_ACT; OP_NNZ_ACT = B_NNZ_ACT;
|
| 991 |
+
MY_NNZ = A_NNZ; OP_NNZ = B_NNZ;
|
| 992 |
+
MY_VALID = &(A_VALID[0]);
|
| 993 |
+
MY_TMP_DATA= &(A_TMP_DATA[0]);
|
| 994 |
+
MY_TMP_INDICES = &(A_TMP_INDICES[0]);
|
| 995 |
+
} else {
|
| 996 |
+
// in charge of one of non-zero element of sparse matrix B
|
| 997 |
+
my_j = i - A_NNZ;
|
| 998 |
+
MY_INDPTR = &(B_INDPTR[0]); OP_INDPTR = &(A_INDPTR[0]);
|
| 999 |
+
MY_INDICES = &(B_INDICES[0]); OP_INDICES = &(A_INDICES[0]);
|
| 1000 |
+
MY_INFO = &(B_INFO[0]); OP_INFO = &(A_INFO[0]);
|
| 1001 |
+
MY_DATA = &(B_DATA[0]); OP_DATA = &(A_DATA[0]);
|
| 1002 |
+
MY_M = B_M; OP_M = A_M;
|
| 1003 |
+
MY_N = B_N; OP_N = A_N;
|
| 1004 |
+
MY_NNZ_ACT = B_NNZ_ACT; OP_NNZ_ACT = A_NNZ_ACT;
|
| 1005 |
+
MY_NNZ = B_NNZ; OP_NNZ = A_NNZ;
|
| 1006 |
+
MY_VALID = &(B_VALID[0]);
|
| 1007 |
+
MY_TMP_DATA= &(B_TMP_DATA[0]);
|
| 1008 |
+
MY_TMP_INDICES = &(B_TMP_INDICES[0]);
|
| 1009 |
+
}
|
| 1010 |
+
int _min, _max, _mid;
|
| 1011 |
+
|
| 1012 |
+
// get column location
|
| 1013 |
+
int my_col;
|
| 1014 |
+
int my_j_act = my_j;
|
| 1015 |
+
if (MY_M == 1 && MY_M < M) {
|
| 1016 |
+
if (MY_N == 1 && MY_N < N) my_j_act = 0;
|
| 1017 |
+
else my_j_act = my_j % MY_NNZ_ACT;
|
| 1018 |
+
} else {
|
| 1019 |
+
if (MY_N == 1 && MY_N < N) my_j_act = my_j / N;
|
| 1020 |
+
}
|
| 1021 |
+
my_col = MY_INDICES[my_j_act];
|
| 1022 |
+
if (MY_N == 1 && MY_N < N) {
|
| 1023 |
+
my_col = my_j % N;
|
| 1024 |
+
}
|
| 1025 |
+
|
| 1026 |
+
// get row location
|
| 1027 |
+
int my_row = get_row_id(my_j_act, 0, MY_M - 1, &(MY_INDPTR[0]));
|
| 1028 |
+
if (MY_M == 1 && MY_M < M) {
|
| 1029 |
+
if (MY_N == 1 && MY_N < N) my_row = my_j / N;
|
| 1030 |
+
else my_row = my_j / MY_NNZ_ACT;
|
| 1031 |
+
}
|
| 1032 |
+
|
| 1033 |
+
int op_row = my_row;
|
| 1034 |
+
int op_row_act = op_row;
|
| 1035 |
+
if (OP_M == 1 && OP_M < M) {
|
| 1036 |
+
op_row_act = 0;
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
int op_col = 0;
|
| 1040 |
+
_min = OP_INDPTR[op_row_act];
|
| 1041 |
+
_max = OP_INDPTR[op_row_act + 1] - 1;
|
| 1042 |
+
int op_j_act = _min;
|
| 1043 |
+
bool op_nz = false;
|
| 1044 |
+
if (_min <= _max) {
|
| 1045 |
+
if (OP_N == 1 && OP_N < N) {
|
| 1046 |
+
op_col = my_col;
|
| 1047 |
+
op_nz = true;
|
| 1048 |
+
}
|
| 1049 |
+
else {
|
| 1050 |
+
_mid = (_min + _max) / 2;
|
| 1051 |
+
op_col = OP_INDICES[_mid];
|
| 1052 |
+
while (_min < _max) {
|
| 1053 |
+
if (op_col < my_col) {
|
| 1054 |
+
_min = _mid + 1;
|
| 1055 |
+
} else if (op_col > my_col) {
|
| 1056 |
+
_max = _mid;
|
| 1057 |
+
} else {
|
| 1058 |
+
break;
|
| 1059 |
+
}
|
| 1060 |
+
_mid = (_min + _max) / 2;
|
| 1061 |
+
op_col = OP_INDICES[_mid];
|
| 1062 |
+
}
|
| 1063 |
+
op_j_act = _mid;
|
| 1064 |
+
if (op_col == my_col) {
|
| 1065 |
+
op_nz = true;
|
| 1066 |
+
} else if (op_col < my_col) {
|
| 1067 |
+
op_col = N;
|
| 1068 |
+
op_j_act += 1;
|
| 1069 |
+
}
|
| 1070 |
+
}
|
| 1071 |
+
}
|
| 1072 |
+
|
| 1073 |
+
int op_j = op_j_act;
|
| 1074 |
+
if (OP_M == 1 && OP_M < M) {
|
| 1075 |
+
if (OP_N == 1 && OP_N < N) {
|
| 1076 |
+
op_j = (op_col + N * op_row) * OP_NNZ_ACT;
|
| 1077 |
+
} else {
|
| 1078 |
+
op_j = op_j_act + OP_NNZ_ACT * op_row;
|
| 1079 |
+
}
|
| 1080 |
+
} else {
|
| 1081 |
+
if (OP_N == 1 && OP_N < N) {
|
| 1082 |
+
op_j = op_col + N * op_j_act;
|
| 1083 |
+
}
|
| 1084 |
+
}
|
| 1085 |
+
|
| 1086 |
+
if (i < A_NNZ || !op_nz) {
|
| 1087 |
+
T my_data = MY_DATA[my_j_act];
|
| 1088 |
+
T op_data = 0;
|
| 1089 |
+
if (op_nz) op_data = OP_DATA[op_j_act];
|
| 1090 |
+
O out;
|
| 1091 |
+
if (i < A_NNZ) out = binopt(my_data, op_data);
|
| 1092 |
+
else out = binopt(op_data, my_data);
|
| 1093 |
+
if (out != static_cast<O>(0)) {
|
| 1094 |
+
MY_VALID[my_j] = 1;
|
| 1095 |
+
MY_TMP_DATA[my_j] = out;
|
| 1096 |
+
MY_TMP_INDICES[my_j] = my_col;
|
| 1097 |
+
atomicAdd( &(C_INFO[my_row + 1]), 1 );
|
| 1098 |
+
atomicAdd( &(MY_INFO[my_j + 1]), 1 );
|
| 1099 |
+
atomicAdd( &(OP_INFO[op_j]), 1 );
|
| 1100 |
+
}
|
| 1101 |
+
}
|
| 1102 |
+
''',
|
| 1103 |
+
name, preamble=preamble,
|
| 1104 |
+
)
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
@cupy._util.memoize(for_each_device=True)
|
| 1108 |
+
def cupy_binopt_csr_step2(op_name):
|
| 1109 |
+
name = 'cupyx_scipy_sparse_csr_binopt' + op_name + 'step2'
|
| 1110 |
+
return cupy.ElementwiseKernel(
|
| 1111 |
+
'''
|
| 1112 |
+
raw I A_INFO, raw B A_VALID, raw I A_TMP_INDICES, raw O A_TMP_DATA,
|
| 1113 |
+
int32 A_NNZ,
|
| 1114 |
+
raw I B_INFO, raw B B_VALID, raw I B_TMP_INDICES, raw O B_TMP_DATA,
|
| 1115 |
+
int32 B_NNZ
|
| 1116 |
+
''',
|
| 1117 |
+
'raw I C_INDICES, raw O C_DATA',
|
| 1118 |
+
'''
|
| 1119 |
+
if (i < A_NNZ) {
|
| 1120 |
+
int j = i;
|
| 1121 |
+
if (A_VALID[j]) {
|
| 1122 |
+
C_INDICES[A_INFO[j]] = A_TMP_INDICES[j];
|
| 1123 |
+
C_DATA[A_INFO[j]] = A_TMP_DATA[j];
|
| 1124 |
+
}
|
| 1125 |
+
} else if (i < A_NNZ + B_NNZ) {
|
| 1126 |
+
int j = i - A_NNZ;
|
| 1127 |
+
if (B_VALID[j]) {
|
| 1128 |
+
C_INDICES[B_INFO[j]] = B_TMP_INDICES[j];
|
| 1129 |
+
C_DATA[B_INFO[j]] = B_TMP_DATA[j];
|
| 1130 |
+
}
|
| 1131 |
+
}
|
| 1132 |
+
''',
|
| 1133 |
+
name,
|
| 1134 |
+
)
|
| 1135 |
+
|
| 1136 |
+
|
| 1137 |
+
def csr2dense(a, order):
|
| 1138 |
+
out = cupy.zeros(a.shape, dtype=a.dtype, order=order)
|
| 1139 |
+
m, n = a.shape
|
| 1140 |
+
kern = _cupy_csr2dense(a.dtype)
|
| 1141 |
+
kern(m, n, a.indptr, a.indices, a.data, (order == 'C'), out)
|
| 1142 |
+
return out
|
| 1143 |
+
|
| 1144 |
+
|
| 1145 |
+
@cupy._util.memoize(for_each_device=True)
|
| 1146 |
+
def _cupy_csr2dense(dtype):
|
| 1147 |
+
if dtype == '?':
|
| 1148 |
+
op = "if (DATA) OUT[index] = true;"
|
| 1149 |
+
else:
|
| 1150 |
+
op = "atomicAdd(&OUT[index], DATA);"
|
| 1151 |
+
|
| 1152 |
+
return cupy.ElementwiseKernel(
|
| 1153 |
+
'int32 M, int32 N, raw I INDPTR, I INDICES, T DATA, bool C_ORDER',
|
| 1154 |
+
'raw T OUT',
|
| 1155 |
+
'''
|
| 1156 |
+
int row = get_row_id(i, 0, M - 1, &(INDPTR[0]));
|
| 1157 |
+
int col = INDICES;
|
| 1158 |
+
int index = C_ORDER ? col + N * row : row + M * col;
|
| 1159 |
+
''' + op,
|
| 1160 |
+
'cupyx_scipy_sparse_csr2dense',
|
| 1161 |
+
preamble=_GET_ROW_ID_
|
| 1162 |
+
)
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
def dense2csr(a):
|
| 1166 |
+
from cupyx import cusparse
|
| 1167 |
+
|
| 1168 |
+
if a.dtype.char in 'fdFD':
|
| 1169 |
+
if cusparse.check_availability('denseToSparse'):
|
| 1170 |
+
return cusparse.denseToSparse(a, format='csr')
|
| 1171 |
+
else:
|
| 1172 |
+
return cusparse.dense2csr(a)
|
| 1173 |
+
m, n = a.shape
|
| 1174 |
+
a = cupy.ascontiguousarray(a)
|
| 1175 |
+
indptr = cupy.zeros(m + 1, dtype=numpy.int32)
|
| 1176 |
+
info = cupy.zeros(m * n + 1, dtype=numpy.int32)
|
| 1177 |
+
cupy_dense2csr_step1()(m, n, a, indptr, info)
|
| 1178 |
+
indptr = cupy.cumsum(indptr, dtype=numpy.int32)
|
| 1179 |
+
info = cupy.cumsum(info, dtype=numpy.int32)
|
| 1180 |
+
nnz = int(indptr[-1])
|
| 1181 |
+
indices = cupy.empty(nnz, dtype=numpy.int32)
|
| 1182 |
+
data = cupy.empty(nnz, dtype=a.dtype)
|
| 1183 |
+
cupy_dense2csr_step2()(m, n, a, info, indices, data)
|
| 1184 |
+
return csr_matrix((data, indices, indptr), shape=(m, n))
|
| 1185 |
+
|
| 1186 |
+
|
| 1187 |
+
@cupy._util.memoize(for_each_device=True)
|
| 1188 |
+
def cupy_dense2csr_step1():
|
| 1189 |
+
return cupy.ElementwiseKernel(
|
| 1190 |
+
'int32 M, int32 N, T A',
|
| 1191 |
+
'raw I INDPTR, raw I INFO',
|
| 1192 |
+
'''
|
| 1193 |
+
int row = i / N;
|
| 1194 |
+
int col = i % N;
|
| 1195 |
+
if (A != static_cast<T>(0)) {
|
| 1196 |
+
atomicAdd( &(INDPTR[row + 1]), 1 );
|
| 1197 |
+
INFO[i + 1] = 1;
|
| 1198 |
+
}
|
| 1199 |
+
''',
|
| 1200 |
+
'cupyx_scipy_sparse_dense2csr_step1')
|
| 1201 |
+
|
| 1202 |
+
|
| 1203 |
+
@cupy._util.memoize(for_each_device=True)
|
| 1204 |
+
def cupy_dense2csr_step2():
|
| 1205 |
+
return cupy.ElementwiseKernel(
|
| 1206 |
+
'int32 M, int32 N, T A, raw I INFO',
|
| 1207 |
+
'raw I INDICES, raw T DATA',
|
| 1208 |
+
'''
|
| 1209 |
+
int row = i / N;
|
| 1210 |
+
int col = i % N;
|
| 1211 |
+
if (A != static_cast<T>(0)) {
|
| 1212 |
+
int idx = INFO[i];
|
| 1213 |
+
INDICES[idx] = col;
|
| 1214 |
+
DATA[idx] = A;
|
| 1215 |
+
}
|
| 1216 |
+
''',
|
| 1217 |
+
'cupyx_scipy_sparse_dense2csr_step2')
|
| 1218 |
+
|
| 1219 |
+
|
| 1220 |
+
@cupy._util.memoize(for_each_device=True)
|
| 1221 |
+
def _cupy_csr_diagonal():
|
| 1222 |
+
return cupy.ElementwiseKernel(
|
| 1223 |
+
'int32 k, int32 rows, int32 cols, '
|
| 1224 |
+
'raw T data, raw I indptr, raw I indices',
|
| 1225 |
+
'T y',
|
| 1226 |
+
'''
|
| 1227 |
+
int row = i;
|
| 1228 |
+
int col = i;
|
| 1229 |
+
if (k < 0) row -= k;
|
| 1230 |
+
if (k > 0) col += k;
|
| 1231 |
+
if (row >= rows || col >= cols) return;
|
| 1232 |
+
int j = find_index_holding_col_in_row(row, col,
|
| 1233 |
+
&(indptr[0]), &(indices[0]));
|
| 1234 |
+
if (j >= 0) {
|
| 1235 |
+
y = data[j];
|
| 1236 |
+
} else {
|
| 1237 |
+
y = static_cast<T>(0);
|
| 1238 |
+
}
|
| 1239 |
+
''',
|
| 1240 |
+
'cupyx_scipy_sparse_csr_diagonal',
|
| 1241 |
+
preamble=_FIND_INDEX_HOLDING_COL_IN_ROW_
|
| 1242 |
+
)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_data.py
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cupy
|
| 2 |
+
import numpy as np
|
| 3 |
+
from cupy._core import internal
|
| 4 |
+
from cupy import _util
|
| 5 |
+
from cupyx.scipy.sparse import _base
|
| 6 |
+
from cupyx.scipy.sparse import _coo
|
| 7 |
+
from cupyx.scipy.sparse import _sputils
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
_ufuncs = [
|
| 11 |
+
'arcsin', 'arcsinh', 'arctan', 'arctanh', 'ceil', 'deg2rad', 'expm1',
|
| 12 |
+
'floor', 'log1p', 'rad2deg', 'rint', 'sign', 'sin', 'sinh', 'sqrt', 'tan',
|
| 13 |
+
'tanh', 'trunc',
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _data_matrix(_base.spmatrix):
|
| 18 |
+
|
| 19 |
+
def __init__(self, data):
|
| 20 |
+
self.data = data
|
| 21 |
+
|
| 22 |
+
@property
|
| 23 |
+
def dtype(self):
|
| 24 |
+
"""Data type of the matrix."""
|
| 25 |
+
return self.data.dtype
|
| 26 |
+
|
| 27 |
+
def _with_data(self, data, copy=True):
|
| 28 |
+
raise NotImplementedError
|
| 29 |
+
|
| 30 |
+
def __abs__(self):
|
| 31 |
+
"""Elementwise absolute."""
|
| 32 |
+
return self._with_data(abs(self.data))
|
| 33 |
+
|
| 34 |
+
def __neg__(self):
|
| 35 |
+
"""Elementwise negative."""
|
| 36 |
+
return self._with_data(-self.data)
|
| 37 |
+
|
| 38 |
+
def astype(self, t):
|
| 39 |
+
"""Casts the array to given data type.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
dtype: Type specifier.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
A copy of the array with a given type.
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
return self._with_data(self.data.astype(t))
|
| 49 |
+
|
| 50 |
+
def conj(self, copy=True):
|
| 51 |
+
if cupy.issubdtype(self.dtype, cupy.complexfloating):
|
| 52 |
+
return self._with_data(self.data.conj(), copy=copy)
|
| 53 |
+
elif copy:
|
| 54 |
+
return self.copy()
|
| 55 |
+
else:
|
| 56 |
+
return self
|
| 57 |
+
|
| 58 |
+
conj.__doc__ = _base.spmatrix.conj.__doc__
|
| 59 |
+
|
| 60 |
+
def copy(self):
|
| 61 |
+
return self._with_data(self.data.copy(), copy=True)
|
| 62 |
+
|
| 63 |
+
copy.__doc__ = _base.spmatrix.copy.__doc__
|
| 64 |
+
|
| 65 |
+
def count_nonzero(self):
|
| 66 |
+
"""Returns number of non-zero entries.
|
| 67 |
+
|
| 68 |
+
.. note::
|
| 69 |
+
This method counts the actual number of non-zero entories, which
|
| 70 |
+
does not include explicit zero entries.
|
| 71 |
+
Instead ``nnz`` returns the number of entries including explicit
|
| 72 |
+
zeros.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
Number of non-zero entries.
|
| 76 |
+
|
| 77 |
+
"""
|
| 78 |
+
return cupy.count_nonzero(self.data)
|
| 79 |
+
|
| 80 |
+
def mean(self, axis=None, dtype=None, out=None):
|
| 81 |
+
"""Compute the arithmetic mean along the specified axis.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
axis (int or ``None``): Axis along which the sum is computed.
|
| 85 |
+
If it is ``None``, it computes the average of all the elements.
|
| 86 |
+
Select from ``{None, 0, 1, -2, -1}``.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
cupy.ndarray: Summed array.
|
| 90 |
+
|
| 91 |
+
.. seealso::
|
| 92 |
+
:meth:`scipy.sparse.spmatrix.mean`
|
| 93 |
+
|
| 94 |
+
"""
|
| 95 |
+
_sputils.validateaxis(axis)
|
| 96 |
+
nRow, nCol = self.shape
|
| 97 |
+
data = self.data.copy()
|
| 98 |
+
|
| 99 |
+
if axis is None:
|
| 100 |
+
n = nRow * nCol
|
| 101 |
+
elif axis in (0, -2):
|
| 102 |
+
n = nRow
|
| 103 |
+
else:
|
| 104 |
+
n = nCol
|
| 105 |
+
|
| 106 |
+
return self._with_data(data / n).sum(axis, dtype, out)
|
| 107 |
+
|
| 108 |
+
def power(self, n, dtype=None):
|
| 109 |
+
"""Elementwise power function.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
n: Exponent.
|
| 113 |
+
dtype: Type specifier.
|
| 114 |
+
|
| 115 |
+
"""
|
| 116 |
+
if dtype is None:
|
| 117 |
+
data = self.data.copy()
|
| 118 |
+
else:
|
| 119 |
+
data = self.data.astype(dtype, copy=True)
|
| 120 |
+
data **= n
|
| 121 |
+
return self._with_data(data)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def _find_missing_index(ind, n):
|
| 125 |
+
positions = cupy.arange(ind.size)
|
| 126 |
+
diff = ind != positions
|
| 127 |
+
return cupy.where(
|
| 128 |
+
diff.any(),
|
| 129 |
+
diff.argmax(),
|
| 130 |
+
cupy.asarray(ind.size if ind.size < n else -1))
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def _non_zero_cmp(mat, am, zero, m):
|
| 134 |
+
size = np.prod(mat.shape)
|
| 135 |
+
if size == mat.nnz:
|
| 136 |
+
return am
|
| 137 |
+
else:
|
| 138 |
+
ind = mat.row * mat.shape[1] + mat.col
|
| 139 |
+
zero_ind = _find_missing_index(ind, size)
|
| 140 |
+
return cupy.where(
|
| 141 |
+
m == zero,
|
| 142 |
+
cupy.minimum(zero_ind, am),
|
| 143 |
+
zero_ind)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class _minmax_mixin(object):
|
| 147 |
+
"""Mixin for min and max methods.
|
| 148 |
+
These are not implemented for dia_matrix, hence the separate class.
|
| 149 |
+
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
def _min_or_max_axis(self, axis, min_or_max, explicit):
|
| 153 |
+
N = self.shape[axis]
|
| 154 |
+
if N == 0:
|
| 155 |
+
raise ValueError("zero-size array to reduction operation")
|
| 156 |
+
M = self.shape[1 - axis]
|
| 157 |
+
|
| 158 |
+
mat = self.tocsc() if axis == 0 else self.tocsr()
|
| 159 |
+
mat.sum_duplicates()
|
| 160 |
+
|
| 161 |
+
# Do the reduction
|
| 162 |
+
value = mat._minor_reduce(min_or_max, axis, explicit)
|
| 163 |
+
major_index = cupy.arange(M)
|
| 164 |
+
|
| 165 |
+
mask = value != 0
|
| 166 |
+
major_index = cupy.compress(mask, major_index)
|
| 167 |
+
value = cupy.compress(mask, value)
|
| 168 |
+
|
| 169 |
+
if axis == 0:
|
| 170 |
+
return _coo.coo_matrix(
|
| 171 |
+
(value, (cupy.zeros(len(value)), major_index)),
|
| 172 |
+
dtype=self.dtype, shape=(1, M))
|
| 173 |
+
else:
|
| 174 |
+
return _coo.coo_matrix(
|
| 175 |
+
(value, (major_index, cupy.zeros(len(value)))),
|
| 176 |
+
dtype=self.dtype, shape=(M, 1))
|
| 177 |
+
|
| 178 |
+
def _min_or_max(self, axis, out, min_or_max, explicit):
|
| 179 |
+
if out is not None:
|
| 180 |
+
raise ValueError(("Sparse matrices do not support "
|
| 181 |
+
"an 'out' parameter."))
|
| 182 |
+
|
| 183 |
+
_sputils.validateaxis(axis)
|
| 184 |
+
|
| 185 |
+
if axis is None:
|
| 186 |
+
if 0 in self.shape:
|
| 187 |
+
raise ValueError("zero-size array to reduction operation")
|
| 188 |
+
|
| 189 |
+
zero = cupy.zeros((), dtype=self.dtype)
|
| 190 |
+
if self.nnz == 0:
|
| 191 |
+
return zero
|
| 192 |
+
self.sum_duplicates()
|
| 193 |
+
m = min_or_max(self.data)
|
| 194 |
+
if explicit:
|
| 195 |
+
return m
|
| 196 |
+
if self.nnz != internal.prod(self.shape):
|
| 197 |
+
if min_or_max is cupy.min:
|
| 198 |
+
m = cupy.minimum(zero, m)
|
| 199 |
+
elif min_or_max is cupy.max:
|
| 200 |
+
m = cupy.maximum(zero, m)
|
| 201 |
+
else:
|
| 202 |
+
assert False
|
| 203 |
+
return m
|
| 204 |
+
|
| 205 |
+
if axis < 0:
|
| 206 |
+
axis += 2
|
| 207 |
+
|
| 208 |
+
return self._min_or_max_axis(axis, min_or_max, explicit)
|
| 209 |
+
|
| 210 |
+
def _arg_min_or_max_axis(self, axis, op):
|
| 211 |
+
if self.shape[axis] == 0:
|
| 212 |
+
raise ValueError("Can't apply the operation along a zero-sized "
|
| 213 |
+
"dimension.")
|
| 214 |
+
|
| 215 |
+
mat = self.tocsc() if axis == 0 else self.tocsr()
|
| 216 |
+
mat.sum_duplicates()
|
| 217 |
+
|
| 218 |
+
# Do the reduction
|
| 219 |
+
value = mat._arg_minor_reduce(op, axis)
|
| 220 |
+
|
| 221 |
+
if axis == 0:
|
| 222 |
+
return value[None, :]
|
| 223 |
+
else:
|
| 224 |
+
return value[:, None]
|
| 225 |
+
|
| 226 |
+
def _arg_min_or_max(self, axis, out, op, compare):
|
| 227 |
+
if out is not None:
|
| 228 |
+
raise ValueError("Sparse matrices do not support "
|
| 229 |
+
"an 'out' parameter.")
|
| 230 |
+
|
| 231 |
+
_sputils.validateaxis(axis)
|
| 232 |
+
|
| 233 |
+
if axis is None:
|
| 234 |
+
if 0 in self.shape:
|
| 235 |
+
raise ValueError("Can't apply the operation to "
|
| 236 |
+
"an empty matrix.")
|
| 237 |
+
|
| 238 |
+
if self.nnz == 0:
|
| 239 |
+
return 0
|
| 240 |
+
else:
|
| 241 |
+
zero = cupy.asarray(self.dtype.type(0))
|
| 242 |
+
mat = self.tocoo()
|
| 243 |
+
|
| 244 |
+
mat.sum_duplicates()
|
| 245 |
+
|
| 246 |
+
am = op(mat.data)
|
| 247 |
+
m = mat.data[am]
|
| 248 |
+
|
| 249 |
+
return cupy.where(
|
| 250 |
+
compare(m, zero), mat.row[am] * mat.shape[1] + mat.col[am],
|
| 251 |
+
_non_zero_cmp(mat, am, zero, m))
|
| 252 |
+
|
| 253 |
+
if axis < 0:
|
| 254 |
+
axis += 2
|
| 255 |
+
|
| 256 |
+
return self._arg_min_or_max_axis(axis, op)
|
| 257 |
+
|
| 258 |
+
def max(self, axis=None, out=None, *, explicit=False):
|
| 259 |
+
"""Returns the maximum of the matrix or maximum along an axis.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
axis (int): {-2, -1, 0, 1, ``None``} (optional)
|
| 263 |
+
Axis along which the sum is computed. The default is to
|
| 264 |
+
compute the maximum over all the matrix elements, returning
|
| 265 |
+
a scalar (i.e. ``axis`` = ``None``).
|
| 266 |
+
out (None): (optional)
|
| 267 |
+
This argument is in the signature *solely* for NumPy
|
| 268 |
+
compatibility reasons. Do not pass in anything except
|
| 269 |
+
for the default value, as this argument is not used.
|
| 270 |
+
explicit (bool): Return the maximum value explicitly specified and
|
| 271 |
+
ignore all implicit zero entries. If the dimension has no
|
| 272 |
+
explicit values, a zero is then returned to indicate that it is
|
| 273 |
+
the only implicit value. This parameter is experimental and may
|
| 274 |
+
change in the future.
|
| 275 |
+
|
| 276 |
+
Returns:
|
| 277 |
+
(cupy.ndarray or float): Maximum of ``a``. If ``axis`` is
|
| 278 |
+
``None``, the result is a scalar value. If ``axis`` is given,
|
| 279 |
+
the result is an array of dimension ``a.ndim - 1``. This
|
| 280 |
+
differs from numpy for computational efficiency.
|
| 281 |
+
|
| 282 |
+
.. seealso:: min : The minimum value of a sparse matrix along a given
|
| 283 |
+
axis.
|
| 284 |
+
.. seealso:: numpy.matrix.max : NumPy's implementation of ``max`` for
|
| 285 |
+
matrices
|
| 286 |
+
|
| 287 |
+
"""
|
| 288 |
+
if explicit:
|
| 289 |
+
api_name = 'explicit of cupyx.scipy.sparse.{}.max'.format(
|
| 290 |
+
self.__class__.__name__)
|
| 291 |
+
_util.experimental(api_name)
|
| 292 |
+
return self._min_or_max(axis, out, cupy.max, explicit)
|
| 293 |
+
|
| 294 |
+
def min(self, axis=None, out=None, *, explicit=False):
|
| 295 |
+
"""Returns the minimum of the matrix or maximum along an axis.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
axis (int): {-2, -1, 0, 1, ``None``} (optional)
|
| 299 |
+
Axis along which the sum is computed. The default is to
|
| 300 |
+
compute the minimum over all the matrix elements, returning
|
| 301 |
+
a scalar (i.e. ``axis`` = ``None``).
|
| 302 |
+
out (None): (optional)
|
| 303 |
+
This argument is in the signature *solely* for NumPy
|
| 304 |
+
compatibility reasons. Do not pass in anything except for
|
| 305 |
+
the default value, as this argument is not used.
|
| 306 |
+
explicit (bool): Return the minimum value explicitly specified and
|
| 307 |
+
ignore all implicit zero entries. If the dimension has no
|
| 308 |
+
explicit values, a zero is then returned to indicate that it is
|
| 309 |
+
the only implicit value. This parameter is experimental and may
|
| 310 |
+
change in the future.
|
| 311 |
+
|
| 312 |
+
Returns:
|
| 313 |
+
(cupy.ndarray or float): Minimum of ``a``. If ``axis`` is
|
| 314 |
+
None, the result is a scalar value. If ``axis`` is given, the
|
| 315 |
+
result is an array of dimension ``a.ndim - 1``. This differs
|
| 316 |
+
from numpy for computational efficiency.
|
| 317 |
+
|
| 318 |
+
.. seealso:: max : The maximum value of a sparse matrix along a given
|
| 319 |
+
axis.
|
| 320 |
+
.. seealso:: numpy.matrix.min : NumPy's implementation of 'min' for
|
| 321 |
+
matrices
|
| 322 |
+
|
| 323 |
+
"""
|
| 324 |
+
if explicit:
|
| 325 |
+
api_name = 'explicit of cupyx.scipy.sparse.{}.min'.format(
|
| 326 |
+
self.__class__.__name__)
|
| 327 |
+
_util.experimental(api_name)
|
| 328 |
+
return self._min_or_max(axis, out, cupy.min, explicit)
|
| 329 |
+
|
| 330 |
+
def argmax(self, axis=None, out=None):
|
| 331 |
+
"""Returns indices of maximum elements along an axis.
|
| 332 |
+
|
| 333 |
+
Implicit zero elements are taken into account. If there are several
|
| 334 |
+
maximum values, the index of the first occurrence is returned. If
|
| 335 |
+
``NaN`` values occur in the matrix, the output defaults to a zero entry
|
| 336 |
+
for the row/column in which the NaN occurs.
|
| 337 |
+
|
| 338 |
+
Args:
|
| 339 |
+
axis (int): {-2, -1, 0, 1, ``None``} (optional)
|
| 340 |
+
Axis along which the argmax is computed. If ``None`` (default),
|
| 341 |
+
index of the maximum element in the flatten data is returned.
|
| 342 |
+
out (None): (optional)
|
| 343 |
+
This argument is in the signature *solely* for NumPy
|
| 344 |
+
compatibility reasons. Do not pass in anything except for
|
| 345 |
+
the default value, as this argument is not used.
|
| 346 |
+
|
| 347 |
+
Returns:
|
| 348 |
+
(cupy.narray or int): Indices of maximum elements. If array,
|
| 349 |
+
its size along ``axis`` is 1.
|
| 350 |
+
|
| 351 |
+
"""
|
| 352 |
+
return self._arg_min_or_max(axis, out, cupy.argmax, cupy.greater)
|
| 353 |
+
|
| 354 |
+
def argmin(self, axis=None, out=None):
|
| 355 |
+
"""
|
| 356 |
+
Returns indices of minimum elements along an axis.
|
| 357 |
+
|
| 358 |
+
Implicit zero elements are taken into account. If there are several
|
| 359 |
+
minimum values, the index of the first occurrence is returned. If
|
| 360 |
+
``NaN`` values occur in the matrix, the output defaults to a zero entry
|
| 361 |
+
for the row/column in which the NaN occurs.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
axis (int): {-2, -1, 0, 1, ``None``} (optional)
|
| 365 |
+
Axis along which the argmin is computed. If ``None`` (default),
|
| 366 |
+
index of the minimum element in the flatten data is returned.
|
| 367 |
+
out (None): (optional)
|
| 368 |
+
This argument is in the signature *solely* for NumPy
|
| 369 |
+
compatibility reasons. Do not pass in anything except for
|
| 370 |
+
the default value, as this argument is not used.
|
| 371 |
+
|
| 372 |
+
Returns:
|
| 373 |
+
(cupy.narray or int): Indices of minimum elements. If matrix,
|
| 374 |
+
its size along ``axis`` is 1.
|
| 375 |
+
|
| 376 |
+
"""
|
| 377 |
+
return self._arg_min_or_max(axis, out, cupy.argmin, cupy.less)
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def _install_ufunc(func_name):
|
| 381 |
+
|
| 382 |
+
def f(self):
|
| 383 |
+
ufunc = getattr(cupy, func_name)
|
| 384 |
+
result = ufunc(self.data)
|
| 385 |
+
return self._with_data(result)
|
| 386 |
+
|
| 387 |
+
f.__doc__ = 'Elementwise %s.' % func_name
|
| 388 |
+
f.__name__ = func_name
|
| 389 |
+
|
| 390 |
+
setattr(_data_matrix, func_name, f)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def _install_ufuncs():
|
| 394 |
+
for func_name in _ufuncs:
|
| 395 |
+
_install_ufunc(func_name)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
_install_ufuncs()
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_dia.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
import scipy.sparse
|
| 3 |
+
_scipy_available = True
|
| 4 |
+
except ImportError:
|
| 5 |
+
_scipy_available = False
|
| 6 |
+
|
| 7 |
+
import cupy
|
| 8 |
+
from cupy import _core
|
| 9 |
+
from cupyx.scipy.sparse import _csc
|
| 10 |
+
from cupyx.scipy.sparse import _data
|
| 11 |
+
from cupyx.scipy.sparse import _util
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# TODO(leofang): The current implementation is CSC-based, which is troublesome
|
| 15 |
+
# on ROCm/HIP. We should convert it to CSR-based for portability.
|
| 16 |
+
class dia_matrix(_data._data_matrix):
|
| 17 |
+
|
| 18 |
+
"""Sparse matrix with DIAgonal storage.
|
| 19 |
+
|
| 20 |
+
Now it has only one initializer format below:
|
| 21 |
+
|
| 22 |
+
``dia_matrix((data, offsets))``
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
arg1: Arguments for the initializer.
|
| 26 |
+
shape (tuple): Shape of a matrix. Its length must be two.
|
| 27 |
+
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
|
| 28 |
+
copy (bool): If ``True``, copies of given arrays are always used.
|
| 29 |
+
|
| 30 |
+
.. seealso::
|
| 31 |
+
:class:`scipy.sparse.dia_matrix`
|
| 32 |
+
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
format = 'dia'
|
| 36 |
+
|
| 37 |
+
def __init__(self, arg1, shape=None, dtype=None, copy=False):
|
| 38 |
+
if _scipy_available and scipy.sparse.issparse(arg1):
|
| 39 |
+
x = arg1.todia()
|
| 40 |
+
data = x.data
|
| 41 |
+
offsets = x.offsets
|
| 42 |
+
shape = x.shape
|
| 43 |
+
dtype = x.dtype
|
| 44 |
+
copy = False
|
| 45 |
+
elif isinstance(arg1, tuple):
|
| 46 |
+
data, offsets = arg1
|
| 47 |
+
if shape is None:
|
| 48 |
+
raise ValueError('expected a shape argument')
|
| 49 |
+
|
| 50 |
+
else:
|
| 51 |
+
raise ValueError(
|
| 52 |
+
'unrecognized form for dia_matrix constructor')
|
| 53 |
+
|
| 54 |
+
data = cupy.array(data, dtype=dtype, copy=copy)
|
| 55 |
+
data = cupy.atleast_2d(data)
|
| 56 |
+
offsets = cupy.array(offsets, dtype='i', copy=copy)
|
| 57 |
+
offsets = cupy.atleast_1d(offsets)
|
| 58 |
+
|
| 59 |
+
if offsets.ndim != 1:
|
| 60 |
+
raise ValueError('offsets array must have rank 1')
|
| 61 |
+
|
| 62 |
+
if data.ndim != 2:
|
| 63 |
+
raise ValueError('data array must have rank 2')
|
| 64 |
+
|
| 65 |
+
if data.shape[0] != len(offsets):
|
| 66 |
+
raise ValueError(
|
| 67 |
+
'number of diagonals (%d) does not match the number of '
|
| 68 |
+
'offsets (%d)'
|
| 69 |
+
% (data.shape[0], len(offsets)))
|
| 70 |
+
|
| 71 |
+
sorted_offsets = cupy.sort(offsets)
|
| 72 |
+
if (sorted_offsets[:-1] == sorted_offsets[1:]).any():
|
| 73 |
+
raise ValueError('offset array contains duplicate values')
|
| 74 |
+
|
| 75 |
+
self.data = data
|
| 76 |
+
self.offsets = offsets
|
| 77 |
+
if not _util.isshape(shape):
|
| 78 |
+
raise ValueError('invalid shape (must be a 2-tuple of int)')
|
| 79 |
+
self._shape = int(shape[0]), int(shape[1])
|
| 80 |
+
|
| 81 |
+
def _with_data(self, data, copy=True):
|
| 82 |
+
"""Returns a matrix with the same sparsity structure as self,
|
| 83 |
+
but with different data. By default the structure arrays are copied.
|
| 84 |
+
"""
|
| 85 |
+
if copy:
|
| 86 |
+
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
|
| 87 |
+
else:
|
| 88 |
+
return dia_matrix((data, self.offsets), shape=self.shape)
|
| 89 |
+
|
| 90 |
+
def get(self, stream=None):
|
| 91 |
+
"""Returns a copy of the array on host memory.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
|
| 95 |
+
copy runs asynchronously. Otherwise, the copy is synchronous.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
scipy.sparse.dia_matrix: Copy of the array on host memory.
|
| 99 |
+
|
| 100 |
+
"""
|
| 101 |
+
if not _scipy_available:
|
| 102 |
+
raise RuntimeError('scipy is not available')
|
| 103 |
+
data = self.data.get(stream)
|
| 104 |
+
offsets = self.offsets.get(stream)
|
| 105 |
+
return scipy.sparse.dia_matrix((data, offsets), shape=self._shape)
|
| 106 |
+
|
| 107 |
+
def get_shape(self):
|
| 108 |
+
"""Returns the shape of the matrix.
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
tuple: Shape of the matrix.
|
| 112 |
+
"""
|
| 113 |
+
return self._shape
|
| 114 |
+
|
| 115 |
+
def getnnz(self, axis=None):
|
| 116 |
+
"""Returns the number of stored values, including explicit zeros.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
axis: Not supported yet.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
int: The number of stored values.
|
| 123 |
+
|
| 124 |
+
"""
|
| 125 |
+
if axis is not None:
|
| 126 |
+
raise NotImplementedError(
|
| 127 |
+
'getnnz over an axis is not implemented for DIA format')
|
| 128 |
+
|
| 129 |
+
m, n = self.shape
|
| 130 |
+
nnz = _core.ReductionKernel(
|
| 131 |
+
'int32 offsets, int32 m, int32 n', 'int32 nnz',
|
| 132 |
+
'offsets > 0 ? min(m, n - offsets) : min(m + offsets, n)',
|
| 133 |
+
'a + b', 'nnz = a', '0', 'dia_nnz')(self.offsets, m, n)
|
| 134 |
+
return int(nnz)
|
| 135 |
+
|
| 136 |
+
def toarray(self, order=None, out=None):
|
| 137 |
+
"""Returns a dense matrix representing the same value."""
|
| 138 |
+
return self.tocsc().toarray(order=order, out=out)
|
| 139 |
+
|
| 140 |
+
def tocsc(self, copy=False):
|
| 141 |
+
"""Converts the matrix to Compressed Sparse Column format.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 145 |
+
possible. Actually this option is ignored because all
|
| 146 |
+
arrays in a matrix cannot be shared in dia to csc conversion.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
cupyx.scipy.sparse.csc_matrix: Converted matrix.
|
| 150 |
+
|
| 151 |
+
"""
|
| 152 |
+
if self.data.size == 0:
|
| 153 |
+
return _csc.csc_matrix(self.shape, dtype=self.dtype)
|
| 154 |
+
|
| 155 |
+
num_rows, num_cols = self.shape
|
| 156 |
+
num_offsets, offset_len = self.data.shape
|
| 157 |
+
|
| 158 |
+
row, mask = _core.ElementwiseKernel(
|
| 159 |
+
'int32 offset_len, int32 offsets, int32 num_rows, '
|
| 160 |
+
'int32 num_cols, T data',
|
| 161 |
+
'int32 row, bool mask',
|
| 162 |
+
'''
|
| 163 |
+
int offset_inds = i % offset_len;
|
| 164 |
+
row = offset_inds - offsets;
|
| 165 |
+
mask = (row >= 0 && row < num_rows && offset_inds < num_cols
|
| 166 |
+
&& data != T(0));
|
| 167 |
+
''',
|
| 168 |
+
'cupyx_scipy_sparse_dia_tocsc')(offset_len, self.offsets[:, None],
|
| 169 |
+
num_rows, num_cols, self.data)
|
| 170 |
+
indptr = cupy.zeros(num_cols + 1, dtype='i')
|
| 171 |
+
indptr[1: offset_len + 1] = cupy.cumsum(mask.sum(axis=0))
|
| 172 |
+
indptr[offset_len + 1:] = indptr[offset_len]
|
| 173 |
+
indices = row.T[mask.T].astype('i', copy=False)
|
| 174 |
+
data = self.data.T[mask.T]
|
| 175 |
+
return _csc.csc_matrix(
|
| 176 |
+
(data, indices, indptr), shape=self.shape, dtype=self.dtype)
|
| 177 |
+
|
| 178 |
+
def tocsr(self, copy=False):
|
| 179 |
+
"""Converts the matrix to Compressed Sparse Row format.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
copy (bool): If ``False``, it shares data arrays as much as
|
| 183 |
+
possible. Actually this option is ignored because all
|
| 184 |
+
arrays in a matrix cannot be shared in dia to csr conversion.
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
cupyx.scipy.sparse.csc_matrix: Converted matrix.
|
| 188 |
+
|
| 189 |
+
"""
|
| 190 |
+
return self.tocsc().tocsr()
|
| 191 |
+
|
| 192 |
+
def diagonal(self, k=0):
|
| 193 |
+
"""Returns the k-th diagonal of the matrix.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
k (int, optional): Which diagonal to get, corresponding to elements
|
| 197 |
+
a[i, i+k]. Default: 0 (the main diagonal).
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
cupy.ndarray : The k-th diagonal.
|
| 201 |
+
"""
|
| 202 |
+
rows, cols = self.shape
|
| 203 |
+
if k <= -rows or k >= cols:
|
| 204 |
+
return cupy.empty(0, dtype=self.data.dtype)
|
| 205 |
+
idx, = cupy.nonzero(self.offsets == k)
|
| 206 |
+
first_col, last_col = max(0, k), min(rows + k, cols)
|
| 207 |
+
if idx.size == 0:
|
| 208 |
+
return cupy.zeros(last_col - first_col, dtype=self.data.dtype)
|
| 209 |
+
return self.data[idx[0], first_col:last_col]
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def isspmatrix_dia(x):
|
| 213 |
+
"""Checks if a given matrix is of DIA format.
|
| 214 |
+
|
| 215 |
+
Returns:
|
| 216 |
+
bool: Returns if ``x`` is :class:`cupyx.scipy.sparse.dia_matrix`.
|
| 217 |
+
|
| 218 |
+
"""
|
| 219 |
+
return isinstance(x, dia_matrix)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_extract.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cupy
|
| 2 |
+
import cupyx
|
| 3 |
+
|
| 4 |
+
from cupyx.scipy import sparse
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def find(A):
|
| 8 |
+
"""Returns the indices and values of the nonzero elements of a matrix
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Matrix whose nonzero
|
| 12 |
+
elements are desired.
|
| 13 |
+
|
| 14 |
+
Returns:
|
| 15 |
+
tuple of cupy.ndarray:
|
| 16 |
+
It returns (``I``, ``J``, ``V``). ``I``, ``J``, and ``V`` contain
|
| 17 |
+
respectively the row indices, column indices, and values of the
|
| 18 |
+
nonzero matrix entries.
|
| 19 |
+
|
| 20 |
+
.. seealso:: :func:`scipy.sparse.find`
|
| 21 |
+
"""
|
| 22 |
+
_check_A_type(A)
|
| 23 |
+
A = sparse.coo_matrix(A, copy=True)
|
| 24 |
+
A.sum_duplicates()
|
| 25 |
+
nz_mask = A.data != 0
|
| 26 |
+
return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def tril(A, k=0, format=None):
|
| 30 |
+
"""Returns the lower triangular portion of a matrix in sparse format
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Matrix whose lower
|
| 34 |
+
triangular portion is desired.
|
| 35 |
+
k (integer): The top-most diagonal of the lower triangle.
|
| 36 |
+
format (string): Sparse format of the result, e.g. 'csr', 'csc', etc.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
cupyx.scipy.sparse.spmatrix:
|
| 40 |
+
Lower triangular portion of A in sparse format.
|
| 41 |
+
|
| 42 |
+
.. seealso:: :func:`scipy.sparse.tril`
|
| 43 |
+
"""
|
| 44 |
+
_check_A_type(A)
|
| 45 |
+
A = sparse.coo_matrix(A, copy=False)
|
| 46 |
+
mask = A.row + k >= A.col
|
| 47 |
+
return _masked_coo(A, mask).asformat(format)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def triu(A, k=0, format=None):
|
| 51 |
+
"""Returns the upper triangular portion of a matrix in sparse format
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Matrix whose upper
|
| 55 |
+
triangular portion is desired.
|
| 56 |
+
k (integer): The bottom-most diagonal of the upper triangle.
|
| 57 |
+
format (string): Sparse format of the result, e.g. 'csr', 'csc', etc.
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
cupyx.scipy.sparse.spmatrix:
|
| 61 |
+
Upper triangular portion of A in sparse format.
|
| 62 |
+
|
| 63 |
+
.. seealso:: :func:`scipy.sparse.triu`
|
| 64 |
+
"""
|
| 65 |
+
_check_A_type(A)
|
| 66 |
+
A = sparse.coo_matrix(A, copy=False)
|
| 67 |
+
mask = A.row + k <= A.col
|
| 68 |
+
return _masked_coo(A, mask).asformat(format)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _check_A_type(A):
|
| 72 |
+
if not (isinstance(A, cupy.ndarray) or cupyx.scipy.sparse.isspmatrix(A)):
|
| 73 |
+
msg = 'A must be cupy.ndarray or cupyx.scipy.sparse.spmatrix'
|
| 74 |
+
raise TypeError(msg)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def _masked_coo(A, mask):
|
| 78 |
+
row = A.row[mask]
|
| 79 |
+
col = A.col[mask]
|
| 80 |
+
data = A.data[mask]
|
| 81 |
+
return sparse.coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_index.py
ADDED
|
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Indexing mixin for sparse matrix classes.
|
| 2 |
+
"""
|
| 3 |
+
|
| 4 |
+
import cupy
|
| 5 |
+
from cupy import _core
|
| 6 |
+
|
| 7 |
+
from cupyx.scipy.sparse._base import isspmatrix
|
| 8 |
+
from cupyx.scipy.sparse._base import spmatrix
|
| 9 |
+
|
| 10 |
+
from cupy.cuda import device
|
| 11 |
+
from cupy.cuda import runtime
|
| 12 |
+
|
| 13 |
+
import numpy
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
import scipy.sparse
|
| 17 |
+
scipy_available = True
|
| 18 |
+
except ImportError:
|
| 19 |
+
scipy_available = False
|
| 20 |
+
|
| 21 |
+
_int_scalar_types = (int, numpy.integer, numpy.int_)
|
| 22 |
+
_bool_scalar_types = (bool, numpy.bool_)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
_compress_getitem_kern = _core.ElementwiseKernel(
|
| 26 |
+
'T d, S ind, int32 minor', 'raw T answer',
|
| 27 |
+
'if (ind == minor) atomicAdd(&answer[0], d);',
|
| 28 |
+
'cupyx_scipy_sparse_compress_getitem')
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
_compress_getitem_complex_kern = _core.ElementwiseKernel(
|
| 32 |
+
'T real, T imag, S ind, int32 minor',
|
| 33 |
+
'raw T answer_real, raw T answer_imag',
|
| 34 |
+
'''
|
| 35 |
+
if (ind == minor) {
|
| 36 |
+
atomicAdd(&answer_real[0], real);
|
| 37 |
+
atomicAdd(&answer_imag[0], imag);
|
| 38 |
+
}
|
| 39 |
+
''',
|
| 40 |
+
'cupyx_scipy_sparse_compress_getitem_complex')
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _get_csr_submatrix_major_axis(Ax, Aj, Ap, start, stop):
|
| 44 |
+
"""Return a submatrix of the input sparse matrix by slicing major axis.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
Ax (cupy.ndarray): data array from input sparse matrix
|
| 48 |
+
Aj (cupy.ndarray): indices array from input sparse matrix
|
| 49 |
+
Ap (cupy.ndarray): indptr array from input sparse matrix
|
| 50 |
+
start (int): starting index of major axis
|
| 51 |
+
stop (int): ending index of major axis
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
Bx (cupy.ndarray): data array of output sparse matrix
|
| 55 |
+
Bj (cupy.ndarray): indices array of output sparse matrix
|
| 56 |
+
Bp (cupy.ndarray): indptr array of output sparse matrix
|
| 57 |
+
|
| 58 |
+
"""
|
| 59 |
+
Ap = Ap[start:stop + 1]
|
| 60 |
+
start_offset, stop_offset = int(Ap[0]), int(Ap[-1])
|
| 61 |
+
Bp = Ap - start_offset
|
| 62 |
+
Bj = Aj[start_offset:stop_offset]
|
| 63 |
+
Bx = Ax[start_offset:stop_offset]
|
| 64 |
+
|
| 65 |
+
return Bx, Bj, Bp
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _get_csr_submatrix_minor_axis(Ax, Aj, Ap, start, stop):
|
| 69 |
+
"""Return a submatrix of the input sparse matrix by slicing minor axis.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
Ax (cupy.ndarray): data array from input sparse matrix
|
| 73 |
+
Aj (cupy.ndarray): indices array from input sparse matrix
|
| 74 |
+
Ap (cupy.ndarray): indptr array from input sparse matrix
|
| 75 |
+
start (int): starting index of minor axis
|
| 76 |
+
stop (int): ending index of minor axis
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
Bx (cupy.ndarray): data array of output sparse matrix
|
| 80 |
+
Bj (cupy.ndarray): indices array of output sparse matrix
|
| 81 |
+
Bp (cupy.ndarray): indptr array of output sparse matrix
|
| 82 |
+
|
| 83 |
+
"""
|
| 84 |
+
mask = (start <= Aj) & (Aj < stop)
|
| 85 |
+
mask_sum = cupy.empty(Aj.size + 1, dtype=Aj.dtype)
|
| 86 |
+
mask_sum[0] = 0
|
| 87 |
+
mask_sum[1:] = mask
|
| 88 |
+
cupy.cumsum(mask_sum, out=mask_sum)
|
| 89 |
+
Bp = mask_sum[Ap]
|
| 90 |
+
Bj = Aj[mask] - start
|
| 91 |
+
Bx = Ax[mask]
|
| 92 |
+
|
| 93 |
+
return Bx, Bj, Bp
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
_csr_row_index_ker = _core.ElementwiseKernel(
|
| 97 |
+
'int32 out_rows, raw I rows, '
|
| 98 |
+
'raw int32 Ap, raw int32 Aj, raw T Ax, raw int32 Bp',
|
| 99 |
+
'int32 Bj, T Bx',
|
| 100 |
+
'''
|
| 101 |
+
const I row = rows[out_rows];
|
| 102 |
+
|
| 103 |
+
// Look up starting offset
|
| 104 |
+
const I starting_output_offset = Bp[out_rows];
|
| 105 |
+
const I output_offset = i - starting_output_offset;
|
| 106 |
+
const I starting_input_offset = Ap[row];
|
| 107 |
+
|
| 108 |
+
Bj = Aj[starting_input_offset + output_offset];
|
| 109 |
+
Bx = Ax[starting_input_offset + output_offset];
|
| 110 |
+
''', 'cupyx_scipy_sparse_csr_row_index_ker')
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _csr_row_index(Ax, Aj, Ap, rows):
|
| 114 |
+
"""Populate indices and data arrays from the given row index
|
| 115 |
+
Args:
|
| 116 |
+
Ax (cupy.ndarray): data array from input sparse matrix
|
| 117 |
+
Aj (cupy.ndarray): indices array from input sparse matrix
|
| 118 |
+
Ap (cupy.ndarray): indptr array from input sparse matrix
|
| 119 |
+
rows (cupy.ndarray): index array of rows to populate
|
| 120 |
+
Returns:
|
| 121 |
+
Bx (cupy.ndarray): data array of output sparse matrix
|
| 122 |
+
Bj (cupy.ndarray): indices array of output sparse matrix
|
| 123 |
+
Bp (cupy.ndarray): indptr array for output sparse matrix
|
| 124 |
+
"""
|
| 125 |
+
row_nnz = cupy.diff(Ap)
|
| 126 |
+
Bp = cupy.empty(rows.size + 1, dtype=Ap.dtype)
|
| 127 |
+
Bp[0] = 0
|
| 128 |
+
cupy.cumsum(row_nnz[rows], out=Bp[1:])
|
| 129 |
+
nnz = int(Bp[-1])
|
| 130 |
+
|
| 131 |
+
out_rows = _csr_indptr_to_coo_rows(nnz, Bp)
|
| 132 |
+
|
| 133 |
+
Bj, Bx = _csr_row_index_ker(out_rows, rows, Ap, Aj, Ax, Bp)
|
| 134 |
+
return Bx, Bj, Bp
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _csr_indptr_to_coo_rows(nnz, Bp):
|
| 138 |
+
from cupy_backends.cuda.libs import cusparse
|
| 139 |
+
|
| 140 |
+
out_rows = cupy.empty(nnz, dtype=numpy.int32)
|
| 141 |
+
|
| 142 |
+
# Build a COO row array from output CSR indptr.
|
| 143 |
+
# Calling backend cusparse API directly to avoid
|
| 144 |
+
# constructing a whole COO object.
|
| 145 |
+
handle = device.get_cusparse_handle()
|
| 146 |
+
if runtime.is_hip and nnz == 0:
|
| 147 |
+
raise ValueError('hipSPARSE currently cannot handle '
|
| 148 |
+
'sparse matrices with null ptrs')
|
| 149 |
+
cusparse.xcsr2coo(
|
| 150 |
+
handle, Bp.data.ptr, nnz, Bp.size-1, out_rows.data.ptr,
|
| 151 |
+
cusparse.CUSPARSE_INDEX_BASE_ZERO)
|
| 152 |
+
|
| 153 |
+
return out_rows
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _select_last_indices(i, j, x, idx_dtype):
|
| 157 |
+
"""Find the unique indices for each row and keep only the last"""
|
| 158 |
+
i = cupy.asarray(i, dtype=idx_dtype)
|
| 159 |
+
j = cupy.asarray(j, dtype=idx_dtype)
|
| 160 |
+
|
| 161 |
+
stacked = cupy.stack([j, i])
|
| 162 |
+
order = cupy.lexsort(stacked).astype(idx_dtype)
|
| 163 |
+
|
| 164 |
+
indptr_inserts = i[order]
|
| 165 |
+
indices_inserts = j[order]
|
| 166 |
+
data_inserts = x[order]
|
| 167 |
+
|
| 168 |
+
mask = cupy.ones(indptr_inserts.size, dtype='bool')
|
| 169 |
+
_unique_mask_kern(indptr_inserts, indices_inserts, order, mask,
|
| 170 |
+
size=indptr_inserts.size-1)
|
| 171 |
+
|
| 172 |
+
return indptr_inserts[mask], indices_inserts[mask], data_inserts[mask]
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
_insert_many_populate_arrays = _core.ElementwiseKernel(
|
| 176 |
+
'''raw I insert_indices, raw T insert_values, raw I insertion_indptr,
|
| 177 |
+
raw I Ap, raw I Aj, raw T Ax, raw I Bp''',
|
| 178 |
+
'raw I Bj, raw T Bx', '''
|
| 179 |
+
|
| 180 |
+
const I input_row_start = Ap[i];
|
| 181 |
+
const I input_row_end = Ap[i+1];
|
| 182 |
+
const I input_count = input_row_end - input_row_start;
|
| 183 |
+
|
| 184 |
+
const I insert_row_start = insertion_indptr[i];
|
| 185 |
+
const I insert_row_end = insertion_indptr[i+1];
|
| 186 |
+
const I insert_count = insert_row_end - insert_row_start;
|
| 187 |
+
|
| 188 |
+
I input_offset = 0;
|
| 189 |
+
I insert_offset = 0;
|
| 190 |
+
|
| 191 |
+
I output_n = Bp[i];
|
| 192 |
+
|
| 193 |
+
I cur_existing_index = -1;
|
| 194 |
+
T cur_existing_value = -1;
|
| 195 |
+
|
| 196 |
+
I cur_insert_index = -1;
|
| 197 |
+
T cur_insert_value = -1;
|
| 198 |
+
|
| 199 |
+
if(input_offset < input_count) {
|
| 200 |
+
cur_existing_index = Aj[input_row_start+input_offset];
|
| 201 |
+
cur_existing_value = Ax[input_row_start+input_offset];
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
if(insert_offset < insert_count) {
|
| 205 |
+
cur_insert_index = insert_indices[insert_row_start+insert_offset];
|
| 206 |
+
cur_insert_value = insert_values[insert_row_start+insert_offset];
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
for(I jj = 0; jj < input_count + insert_count; jj++) {
|
| 211 |
+
|
| 212 |
+
// if we have both available, use the lowest one.
|
| 213 |
+
if(input_offset < input_count &&
|
| 214 |
+
insert_offset < insert_count) {
|
| 215 |
+
|
| 216 |
+
if(cur_existing_index < cur_insert_index) {
|
| 217 |
+
Bj[output_n] = cur_existing_index;
|
| 218 |
+
Bx[output_n] = cur_existing_value;
|
| 219 |
+
|
| 220 |
+
++input_offset;
|
| 221 |
+
|
| 222 |
+
if(input_offset < input_count) {
|
| 223 |
+
cur_existing_index = Aj[input_row_start+input_offset];
|
| 224 |
+
cur_existing_value = Ax[input_row_start+input_offset];
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
} else {
|
| 229 |
+
Bj[output_n] = cur_insert_index;
|
| 230 |
+
Bx[output_n] = cur_insert_value;
|
| 231 |
+
|
| 232 |
+
++insert_offset;
|
| 233 |
+
if(insert_offset < insert_count) {
|
| 234 |
+
cur_insert_index =
|
| 235 |
+
insert_indices[insert_row_start+insert_offset];
|
| 236 |
+
cur_insert_value =
|
| 237 |
+
insert_values[insert_row_start+insert_offset];
|
| 238 |
+
}
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
} else if(input_offset < input_count) {
|
| 242 |
+
Bj[output_n] = cur_existing_index;
|
| 243 |
+
Bx[output_n] = cur_existing_value;
|
| 244 |
+
|
| 245 |
+
++input_offset;
|
| 246 |
+
if(input_offset < input_count) {
|
| 247 |
+
cur_existing_index = Aj[input_row_start+input_offset];
|
| 248 |
+
cur_existing_value = Ax[input_row_start+input_offset];
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
} else {
|
| 252 |
+
Bj[output_n] = cur_insert_index;
|
| 253 |
+
Bx[output_n] = cur_insert_value;
|
| 254 |
+
|
| 255 |
+
++insert_offset;
|
| 256 |
+
if(insert_offset < insert_count) {
|
| 257 |
+
cur_insert_index =
|
| 258 |
+
insert_indices[insert_row_start+insert_offset];
|
| 259 |
+
cur_insert_value =
|
| 260 |
+
insert_values[insert_row_start+insert_offset];
|
| 261 |
+
}
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
output_n++;
|
| 265 |
+
}
|
| 266 |
+
''', 'cupyx_scipy_sparse_csr_copy_existing_indices_kern', no_return=True)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# Create a filter mask based on the lowest value of order
|
| 270 |
+
_unique_mask_kern = _core.ElementwiseKernel(
|
| 271 |
+
'''raw I rows, raw I cols, raw I order''',
|
| 272 |
+
'''raw bool mask''',
|
| 273 |
+
"""
|
| 274 |
+
I cur_row = rows[i];
|
| 275 |
+
I next_row = rows[i+1];
|
| 276 |
+
|
| 277 |
+
I cur_col = cols[i];
|
| 278 |
+
I next_col = cols[i+1];
|
| 279 |
+
|
| 280 |
+
I cur_order = order[i];
|
| 281 |
+
I next_order = order[i+1];
|
| 282 |
+
|
| 283 |
+
if(cur_row == next_row && cur_col == next_col) {
|
| 284 |
+
if(cur_order < next_order)
|
| 285 |
+
mask[i] = false;
|
| 286 |
+
else
|
| 287 |
+
mask[i+1] = false;
|
| 288 |
+
}
|
| 289 |
+
""",
|
| 290 |
+
'cupyx_scipy_sparse_unique_mask_kern',
|
| 291 |
+
no_return=True
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def _csr_sample_values(n_row, n_col,
|
| 296 |
+
Ap, Aj, Ax,
|
| 297 |
+
Bi, Bj, not_found_val=0):
|
| 298 |
+
"""Populate data array for a set of rows and columns
|
| 299 |
+
Args
|
| 300 |
+
n_row : total number of rows in input array
|
| 301 |
+
n_col : total number of columns in input array
|
| 302 |
+
Ap : indptr array for input sparse matrix
|
| 303 |
+
Aj : indices array for input sparse matrix
|
| 304 |
+
Ax : data array for input sparse matrix
|
| 305 |
+
Bi : array of rows to extract from input sparse matrix
|
| 306 |
+
Bj : array of columns to extract from input sparse matrix
|
| 307 |
+
Returns
|
| 308 |
+
Bx : data array for output sparse matrix
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
Bi[Bi < 0] += n_row
|
| 312 |
+
Bj[Bj < 0] += n_col
|
| 313 |
+
|
| 314 |
+
return _csr_sample_values_kern(n_row, n_col,
|
| 315 |
+
Ap, Aj, Ax,
|
| 316 |
+
Bi, Bj,
|
| 317 |
+
not_found_val,
|
| 318 |
+
size=Bi.size)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
_csr_sample_values_kern = _core.ElementwiseKernel(
|
| 322 |
+
'''I n_row, I n_col, raw I Ap, raw I Aj, raw T Ax,
|
| 323 |
+
raw I Bi, raw I Bj, I not_found_val''',
|
| 324 |
+
'raw T Bx', '''
|
| 325 |
+
const I j = Bi[i]; // sample row
|
| 326 |
+
const I k = Bj[i]; // sample column
|
| 327 |
+
const I row_start = Ap[j];
|
| 328 |
+
const I row_end = Ap[j+1];
|
| 329 |
+
T x = 0;
|
| 330 |
+
bool val_found = false;
|
| 331 |
+
for(I jj = row_start; jj < row_end; jj++) {
|
| 332 |
+
if (Aj[jj] == k) {
|
| 333 |
+
x += Ax[jj];
|
| 334 |
+
val_found = true;
|
| 335 |
+
}
|
| 336 |
+
}
|
| 337 |
+
Bx[i] = val_found ? x : not_found_val;
|
| 338 |
+
''', 'cupyx_scipy_sparse_csr_sample_values_kern')
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
class IndexMixin(object):
|
| 342 |
+
"""
|
| 343 |
+
This class provides common dispatching and validation logic for indexing.
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
def __getitem__(self, key):
|
| 347 |
+
|
| 348 |
+
# For testing- Scipy >= 1.4.0 is needed to guarantee
|
| 349 |
+
# results match.
|
| 350 |
+
if scipy_available and numpy.lib.NumpyVersion(
|
| 351 |
+
scipy.__version__) < '1.4.0':
|
| 352 |
+
raise NotImplementedError(
|
| 353 |
+
"Sparse __getitem__() requires Scipy >= 1.4.0")
|
| 354 |
+
|
| 355 |
+
row, col = self._parse_indices(key)
|
| 356 |
+
|
| 357 |
+
# Dispatch to specialized methods.
|
| 358 |
+
if isinstance(row, _int_scalar_types):
|
| 359 |
+
if isinstance(col, _int_scalar_types):
|
| 360 |
+
return self._get_intXint(row, col)
|
| 361 |
+
elif isinstance(col, slice):
|
| 362 |
+
return self._get_intXslice(row, col)
|
| 363 |
+
elif col.ndim == 1:
|
| 364 |
+
return self._get_intXarray(row, col)
|
| 365 |
+
raise IndexError('index results in >2 dimensions')
|
| 366 |
+
elif isinstance(row, slice):
|
| 367 |
+
if isinstance(col, _int_scalar_types):
|
| 368 |
+
return self._get_sliceXint(row, col)
|
| 369 |
+
elif isinstance(col, slice):
|
| 370 |
+
if row == slice(None) and row == col:
|
| 371 |
+
return self.copy()
|
| 372 |
+
return self._get_sliceXslice(row, col)
|
| 373 |
+
elif col.ndim == 1:
|
| 374 |
+
return self._get_sliceXarray(row, col)
|
| 375 |
+
raise IndexError('index results in >2 dimensions')
|
| 376 |
+
elif row.ndim == 1:
|
| 377 |
+
if isinstance(col, _int_scalar_types):
|
| 378 |
+
return self._get_arrayXint(row, col)
|
| 379 |
+
elif isinstance(col, slice):
|
| 380 |
+
return self._get_arrayXslice(row, col)
|
| 381 |
+
else: # row.ndim == 2
|
| 382 |
+
if isinstance(col, _int_scalar_types):
|
| 383 |
+
return self._get_arrayXint(row, col)
|
| 384 |
+
elif isinstance(col, slice):
|
| 385 |
+
raise IndexError('index results in >2 dimensions')
|
| 386 |
+
elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1):
|
| 387 |
+
# special case for outer indexing
|
| 388 |
+
return self._get_columnXarray(row[:, 0], col.ravel())
|
| 389 |
+
|
| 390 |
+
# The only remaining case is inner (fancy) indexing
|
| 391 |
+
row, col = cupy.broadcast_arrays(row, col)
|
| 392 |
+
if row.shape != col.shape:
|
| 393 |
+
raise IndexError('number of row and column indices differ')
|
| 394 |
+
if row.size == 0:
|
| 395 |
+
return self.__class__(cupy.atleast_2d(row).shape, dtype=self.dtype)
|
| 396 |
+
return self._get_arrayXarray(row, col)
|
| 397 |
+
|
| 398 |
+
def __setitem__(self, key, x):
|
| 399 |
+
row, col = self._parse_indices(key)
|
| 400 |
+
|
| 401 |
+
if isinstance(row, _int_scalar_types) and\
|
| 402 |
+
isinstance(col, _int_scalar_types):
|
| 403 |
+
x = cupy.asarray(x, dtype=self.dtype)
|
| 404 |
+
if x.size != 1:
|
| 405 |
+
raise ValueError('Trying to assign a sequence to an item')
|
| 406 |
+
self._set_intXint(row, col, x.flat[0])
|
| 407 |
+
return
|
| 408 |
+
|
| 409 |
+
if isinstance(row, slice):
|
| 410 |
+
row = cupy.arange(*row.indices(self.shape[0]))[:, None]
|
| 411 |
+
else:
|
| 412 |
+
row = cupy.atleast_1d(row)
|
| 413 |
+
|
| 414 |
+
if isinstance(col, slice):
|
| 415 |
+
col = cupy.arange(*col.indices(self.shape[1]))[None, :]
|
| 416 |
+
if row.ndim == 1:
|
| 417 |
+
row = row[:, None]
|
| 418 |
+
else:
|
| 419 |
+
col = cupy.atleast_1d(col)
|
| 420 |
+
|
| 421 |
+
i, j = cupy.broadcast_arrays(row, col)
|
| 422 |
+
if i.shape != j.shape:
|
| 423 |
+
raise IndexError('number of row and column indices differ')
|
| 424 |
+
|
| 425 |
+
if isspmatrix(x):
|
| 426 |
+
if i.ndim == 1:
|
| 427 |
+
# Inner indexing, so treat them like row vectors.
|
| 428 |
+
i = i[None]
|
| 429 |
+
j = j[None]
|
| 430 |
+
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
|
| 431 |
+
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
|
| 432 |
+
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
|
| 433 |
+
(broadcast_col or x.shape[1] == i.shape[1])):
|
| 434 |
+
raise ValueError('shape mismatch in assignment')
|
| 435 |
+
if x.size == 0:
|
| 436 |
+
return
|
| 437 |
+
x = x.tocoo(copy=True)
|
| 438 |
+
x.sum_duplicates()
|
| 439 |
+
self._set_arrayXarray_sparse(i, j, x)
|
| 440 |
+
else:
|
| 441 |
+
# Make x and i into the same shape
|
| 442 |
+
x = cupy.asarray(x, dtype=self.dtype)
|
| 443 |
+
x, _ = cupy.broadcast_arrays(x, i)
|
| 444 |
+
if x.size == 0:
|
| 445 |
+
return
|
| 446 |
+
x = x.reshape(i.shape)
|
| 447 |
+
self._set_arrayXarray(i, j, x)
|
| 448 |
+
|
| 449 |
+
def _is_scalar(self, index):
|
| 450 |
+
if isinstance(index, (cupy.ndarray, numpy.ndarray)) and \
|
| 451 |
+
index.ndim == 0 and index.size == 1:
|
| 452 |
+
return True
|
| 453 |
+
return False
|
| 454 |
+
|
| 455 |
+
def _parse_indices(self, key):
|
| 456 |
+
M, N = self.shape
|
| 457 |
+
row, col = _unpack_index(key)
|
| 458 |
+
|
| 459 |
+
if self._is_scalar(row):
|
| 460 |
+
row = row.item()
|
| 461 |
+
if self._is_scalar(col):
|
| 462 |
+
col = col.item()
|
| 463 |
+
|
| 464 |
+
# Scipy calls sputils.isintlike() rather than
|
| 465 |
+
# isinstance(x, _int_scalar_types). Comparing directly to int
|
| 466 |
+
# here to minimize the impact of nested exception catching
|
| 467 |
+
|
| 468 |
+
if isinstance(row, _int_scalar_types):
|
| 469 |
+
row = _normalize_index(row, M, 'row')
|
| 470 |
+
elif not isinstance(row, slice):
|
| 471 |
+
row = self._asindices(row, M)
|
| 472 |
+
|
| 473 |
+
if isinstance(col, _int_scalar_types):
|
| 474 |
+
col = _normalize_index(col, N, 'column')
|
| 475 |
+
elif not isinstance(col, slice):
|
| 476 |
+
col = self._asindices(col, N)
|
| 477 |
+
|
| 478 |
+
return row, col
|
| 479 |
+
|
| 480 |
+
def _asindices(self, idx, length):
|
| 481 |
+
"""Convert `idx` to a valid index for an axis with a given length.
|
| 482 |
+
Subclasses that need special validation can override this method.
|
| 483 |
+
|
| 484 |
+
idx is assumed to be at least a 1-dimensional array-like, but can
|
| 485 |
+
have no more than 2 dimensions.
|
| 486 |
+
"""
|
| 487 |
+
try:
|
| 488 |
+
x = cupy.asarray(idx, dtype=self.indices.dtype)
|
| 489 |
+
except (ValueError, TypeError, MemoryError):
|
| 490 |
+
raise IndexError('invalid index')
|
| 491 |
+
|
| 492 |
+
if x.ndim not in (1, 2):
|
| 493 |
+
raise IndexError('Index dimension must be <= 2')
|
| 494 |
+
|
| 495 |
+
return x % length
|
| 496 |
+
|
| 497 |
+
def getrow(self, i):
|
| 498 |
+
"""Return a copy of row i of the matrix, as a (1 x n) row vector.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
i (integer): Row
|
| 502 |
+
|
| 503 |
+
Returns:
|
| 504 |
+
cupyx.scipy.sparse.spmatrix: Sparse matrix with single row
|
| 505 |
+
"""
|
| 506 |
+
M, N = self.shape
|
| 507 |
+
i = _normalize_index(i, M, 'index')
|
| 508 |
+
return self._get_intXslice(i, slice(None))
|
| 509 |
+
|
| 510 |
+
def getcol(self, i):
|
| 511 |
+
"""Return a copy of column i of the matrix, as a (m x 1) column vector.
|
| 512 |
+
|
| 513 |
+
Args:
|
| 514 |
+
i (integer): Column
|
| 515 |
+
|
| 516 |
+
Returns:
|
| 517 |
+
cupyx.scipy.sparse.spmatrix: Sparse matrix with single column
|
| 518 |
+
"""
|
| 519 |
+
M, N = self.shape
|
| 520 |
+
i = _normalize_index(i, N, 'index')
|
| 521 |
+
return self._get_sliceXint(slice(None), i)
|
| 522 |
+
|
| 523 |
+
def _get_intXint(self, row, col):
|
| 524 |
+
raise NotImplementedError()
|
| 525 |
+
|
| 526 |
+
def _get_intXarray(self, row, col):
|
| 527 |
+
raise NotImplementedError()
|
| 528 |
+
|
| 529 |
+
def _get_intXslice(self, row, col):
|
| 530 |
+
raise NotImplementedError()
|
| 531 |
+
|
| 532 |
+
def _get_sliceXint(self, row, col):
|
| 533 |
+
raise NotImplementedError()
|
| 534 |
+
|
| 535 |
+
def _get_sliceXslice(self, row, col):
|
| 536 |
+
raise NotImplementedError()
|
| 537 |
+
|
| 538 |
+
def _get_sliceXarray(self, row, col):
|
| 539 |
+
raise NotImplementedError()
|
| 540 |
+
|
| 541 |
+
def _get_arrayXint(self, row, col):
|
| 542 |
+
raise NotImplementedError()
|
| 543 |
+
|
| 544 |
+
def _get_arrayXslice(self, row, col):
|
| 545 |
+
raise NotImplementedError()
|
| 546 |
+
|
| 547 |
+
def _get_columnXarray(self, row, col):
|
| 548 |
+
raise NotImplementedError()
|
| 549 |
+
|
| 550 |
+
def _get_arrayXarray(self, row, col):
|
| 551 |
+
raise NotImplementedError()
|
| 552 |
+
|
| 553 |
+
def _set_intXint(self, row, col, x):
|
| 554 |
+
raise NotImplementedError()
|
| 555 |
+
|
| 556 |
+
def _set_arrayXarray(self, row, col, x):
|
| 557 |
+
raise NotImplementedError()
|
| 558 |
+
|
| 559 |
+
def _set_arrayXarray_sparse(self, row, col, x):
|
| 560 |
+
# Fall back to densifying x
|
| 561 |
+
x = cupy.asarray(x.toarray(), dtype=self.dtype)
|
| 562 |
+
x, _ = cupy.broadcast_arrays(x, row)
|
| 563 |
+
self._set_arrayXarray(row, col, x)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
def _try_is_scipy_spmatrix(index):
|
| 567 |
+
if scipy_available:
|
| 568 |
+
return isinstance(index, scipy.sparse.spmatrix)
|
| 569 |
+
return False
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def _unpack_index(index):
|
| 573 |
+
""" Parse index. Always return a tuple of the form (row, col).
|
| 574 |
+
Valid type for row/col is integer, slice, or array of integers.
|
| 575 |
+
|
| 576 |
+
Returns:
|
| 577 |
+
resulting row & col indices : single integer, slice, or
|
| 578 |
+
array of integers. If row & column indices are supplied
|
| 579 |
+
explicitly, they are used as the major/minor indices.
|
| 580 |
+
If only one index is supplied, the minor index is
|
| 581 |
+
assumed to be all (e.g., [maj, :]).
|
| 582 |
+
"""
|
| 583 |
+
# First, check if indexing with single boolean matrix.
|
| 584 |
+
if ((isinstance(index, (spmatrix, cupy.ndarray,
|
| 585 |
+
numpy.ndarray))
|
| 586 |
+
or _try_is_scipy_spmatrix(index))
|
| 587 |
+
and index.ndim == 2 and index.dtype.kind == 'b'):
|
| 588 |
+
return index.nonzero()
|
| 589 |
+
|
| 590 |
+
# Parse any ellipses.
|
| 591 |
+
index = _eliminate_ellipsis(index)
|
| 592 |
+
|
| 593 |
+
# Next, parse the tuple or object
|
| 594 |
+
if isinstance(index, tuple):
|
| 595 |
+
if len(index) == 2:
|
| 596 |
+
row, col = index
|
| 597 |
+
elif len(index) == 1:
|
| 598 |
+
row, col = index[0], slice(None)
|
| 599 |
+
else:
|
| 600 |
+
raise IndexError('invalid number of indices')
|
| 601 |
+
else:
|
| 602 |
+
idx = _compatible_boolean_index(index)
|
| 603 |
+
if idx is None:
|
| 604 |
+
row, col = index, slice(None)
|
| 605 |
+
elif idx.ndim < 2:
|
| 606 |
+
return _boolean_index_to_array(idx), slice(None)
|
| 607 |
+
elif idx.ndim == 2:
|
| 608 |
+
return idx.nonzero()
|
| 609 |
+
# Next, check for validity and transform the index as needed.
|
| 610 |
+
if isspmatrix(row) or isspmatrix(col):
|
| 611 |
+
# Supporting sparse boolean indexing with both row and col does
|
| 612 |
+
# not work because spmatrix.ndim is always 2.
|
| 613 |
+
raise IndexError(
|
| 614 |
+
'Indexing with sparse matrices is not supported '
|
| 615 |
+
'except boolean indexing where matrix and index '
|
| 616 |
+
'are equal shapes.')
|
| 617 |
+
bool_row = _compatible_boolean_index(row)
|
| 618 |
+
bool_col = _compatible_boolean_index(col)
|
| 619 |
+
if bool_row is not None:
|
| 620 |
+
row = _boolean_index_to_array(bool_row)
|
| 621 |
+
if bool_col is not None:
|
| 622 |
+
col = _boolean_index_to_array(bool_col)
|
| 623 |
+
return row, col
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def _eliminate_ellipsis(index):
|
| 627 |
+
"""Process indices with Ellipsis. Returns modified index."""
|
| 628 |
+
if index is Ellipsis:
|
| 629 |
+
return (slice(None), slice(None))
|
| 630 |
+
|
| 631 |
+
if not isinstance(index, tuple):
|
| 632 |
+
return index
|
| 633 |
+
|
| 634 |
+
# Find first ellipsis.
|
| 635 |
+
for j, v in enumerate(index):
|
| 636 |
+
if v is Ellipsis:
|
| 637 |
+
first_ellipsis = j
|
| 638 |
+
break
|
| 639 |
+
else:
|
| 640 |
+
return index
|
| 641 |
+
|
| 642 |
+
# Try to expand it using shortcuts for common cases
|
| 643 |
+
if len(index) == 1:
|
| 644 |
+
return (slice(None), slice(None))
|
| 645 |
+
if len(index) == 2:
|
| 646 |
+
if first_ellipsis == 0:
|
| 647 |
+
if index[1] is Ellipsis:
|
| 648 |
+
return (slice(None), slice(None))
|
| 649 |
+
return (slice(None), index[1])
|
| 650 |
+
return (index[0], slice(None))
|
| 651 |
+
|
| 652 |
+
# Expand it using a general-purpose algorithm
|
| 653 |
+
tail = []
|
| 654 |
+
for v in index[first_ellipsis+1:]:
|
| 655 |
+
if v is not Ellipsis:
|
| 656 |
+
tail.append(v)
|
| 657 |
+
nd = first_ellipsis + len(tail)
|
| 658 |
+
nslice = max(0, 2 - nd)
|
| 659 |
+
return index[:first_ellipsis] + (slice(None),) * nslice + tuple(tail,)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def _normalize_index(x, dim, name):
|
| 663 |
+
if x < -dim or x >= dim:
|
| 664 |
+
raise IndexError('{} ({}) out of range'.format(name, x))
|
| 665 |
+
if x < 0:
|
| 666 |
+
x += dim
|
| 667 |
+
return x
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def _first_element_bool(idx, max_dim=2):
|
| 671 |
+
"""Returns True if first element of the incompatible
|
| 672 |
+
array type is boolean.
|
| 673 |
+
"""
|
| 674 |
+
if max_dim < 1:
|
| 675 |
+
return None
|
| 676 |
+
try:
|
| 677 |
+
first = idx[0] if len(idx) > 0 else None
|
| 678 |
+
except TypeError:
|
| 679 |
+
return None
|
| 680 |
+
if isinstance(first, _bool_scalar_types):
|
| 681 |
+
return True
|
| 682 |
+
return _first_element_bool(first, max_dim-1)
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
def _compatible_boolean_index(idx):
|
| 686 |
+
"""Returns a boolean index array that can be converted to
|
| 687 |
+
integer array. Returns None if no such array exists.
|
| 688 |
+
"""
|
| 689 |
+
# presence of attribute `ndim` indicates a compatible array type.
|
| 690 |
+
if hasattr(idx, 'ndim'):
|
| 691 |
+
if idx.dtype.kind == 'b':
|
| 692 |
+
return idx
|
| 693 |
+
# non-ndarray bool collection should be converted to ndarray
|
| 694 |
+
elif _first_element_bool(idx):
|
| 695 |
+
return cupy.asarray(idx, dtype='bool')
|
| 696 |
+
return None
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def _boolean_index_to_array(idx):
|
| 700 |
+
if idx.ndim > 1:
|
| 701 |
+
raise IndexError('invalid index shape')
|
| 702 |
+
idx = cupy.array(idx, dtype=idx.dtype)
|
| 703 |
+
return cupy.where(idx)[0]
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_sputils.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cupy
|
| 2 |
+
import operator
|
| 3 |
+
import numpy
|
| 4 |
+
|
| 5 |
+
from cupy._core._dtype import get_dtype
|
| 6 |
+
|
| 7 |
+
supported_dtypes = [get_dtype(x) for x in
|
| 8 |
+
('single', 'double', 'csingle', 'cdouble')]
|
| 9 |
+
|
| 10 |
+
_upcast_memo: dict = {}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def isdense(x):
|
| 14 |
+
return isinstance(x, cupy.ndarray)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def isscalarlike(x):
|
| 18 |
+
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
|
| 19 |
+
return cupy.isscalar(x) or (isdense(x) and x.ndim == 0)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_index_dtype(arrays=(), maxval=None, check_contents=False):
|
| 23 |
+
"""Based on input (integer) arrays ``a``, determines a suitable index data
|
| 24 |
+
type that can hold the data in the arrays.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
arrays (tuple of array_like):
|
| 28 |
+
Input arrays whose types/contents to check
|
| 29 |
+
maxval (float, optional):
|
| 30 |
+
Maximum value needed
|
| 31 |
+
check_contents (bool, optional):
|
| 32 |
+
Whether to check the values in the arrays and not just their types.
|
| 33 |
+
Default: False (check only the types)
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
dtype: Suitable index data type (int32 or int64)
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
int32min = cupy.iinfo(cupy.int32).min
|
| 40 |
+
int32max = cupy.iinfo(cupy.int32).max
|
| 41 |
+
|
| 42 |
+
dtype = cupy.int32
|
| 43 |
+
if maxval is not None:
|
| 44 |
+
if maxval > int32max:
|
| 45 |
+
dtype = cupy.int64
|
| 46 |
+
|
| 47 |
+
if isinstance(arrays, cupy.ndarray):
|
| 48 |
+
arrays = (arrays,)
|
| 49 |
+
|
| 50 |
+
for arr in arrays:
|
| 51 |
+
arr = cupy.asarray(arr)
|
| 52 |
+
if not cupy.can_cast(arr.dtype, cupy.int32):
|
| 53 |
+
if check_contents:
|
| 54 |
+
if arr.size == 0:
|
| 55 |
+
# a bigger type not needed
|
| 56 |
+
continue
|
| 57 |
+
elif cupy.issubdtype(arr.dtype, cupy.integer):
|
| 58 |
+
maxval = arr.max()
|
| 59 |
+
minval = arr.min()
|
| 60 |
+
if minval >= int32min and maxval <= int32max:
|
| 61 |
+
# a bigger type not needed
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
dtype = cupy.int64
|
| 65 |
+
break
|
| 66 |
+
|
| 67 |
+
return dtype
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def validateaxis(axis):
|
| 71 |
+
if axis is not None:
|
| 72 |
+
# In NumPy, you can pass in tuples for 'axis', but they are
|
| 73 |
+
# not very useful for sparse matrices given their limited
|
| 74 |
+
# dimensions, so let's make it explicit that they are not
|
| 75 |
+
# allowed to be passed in
|
| 76 |
+
if isinstance(axis, tuple):
|
| 77 |
+
raise TypeError(("Tuples are not accepted for the 'axis' "
|
| 78 |
+
"parameter. Please pass in one of the "
|
| 79 |
+
"following: {-2, -1, 0, 1, None}."))
|
| 80 |
+
|
| 81 |
+
axis_type = type(axis)
|
| 82 |
+
|
| 83 |
+
# If not a tuple, check that the provided axis is actually
|
| 84 |
+
# an integer and raise a TypeError similar to NumPy's
|
| 85 |
+
if not cupy.issubdtype(cupy.dtype(axis_type), cupy.integer):
|
| 86 |
+
raise TypeError("axis must be an integer, not {name}"
|
| 87 |
+
.format(name=axis_type.__name__))
|
| 88 |
+
|
| 89 |
+
if not (-2 <= axis <= 1):
|
| 90 |
+
raise ValueError("axis out of range")
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def upcast(*args):
|
| 94 |
+
"""Returns the nearest supported sparse dtype for the
|
| 95 |
+
combination of one or more types.
|
| 96 |
+
|
| 97 |
+
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
|
| 98 |
+
|
| 99 |
+
Examples:
|
| 100 |
+
>>> upcast('int32')
|
| 101 |
+
<type 'numpy.int32'>
|
| 102 |
+
>>> upcast('int32','float32')
|
| 103 |
+
<type 'numpy.float64'>
|
| 104 |
+
>>> upcast('bool',float)
|
| 105 |
+
<type 'numpy.complex128'>
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
t = _upcast_memo.get(args)
|
| 109 |
+
if t is not None:
|
| 110 |
+
return t
|
| 111 |
+
|
| 112 |
+
upcast = numpy.result_type(*args)
|
| 113 |
+
|
| 114 |
+
for t in supported_dtypes:
|
| 115 |
+
if cupy.can_cast(upcast, t):
|
| 116 |
+
_upcast_memo[args] = t
|
| 117 |
+
return t
|
| 118 |
+
|
| 119 |
+
raise TypeError('no supported conversion for types: %r' % (args,))
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def check_shape(args, current_shape=None):
|
| 123 |
+
"""Check validity of the shape"""
|
| 124 |
+
|
| 125 |
+
if len(args) == 0:
|
| 126 |
+
raise TypeError("function missing 1 required positional argument: "
|
| 127 |
+
"'shape'")
|
| 128 |
+
|
| 129 |
+
elif len(args) == 1:
|
| 130 |
+
try:
|
| 131 |
+
shape_iter = iter(args[0])
|
| 132 |
+
except TypeError:
|
| 133 |
+
new_shape = (operator.index(args[0]), )
|
| 134 |
+
else:
|
| 135 |
+
new_shape = tuple(operator.index(arg) for arg in shape_iter)
|
| 136 |
+
else:
|
| 137 |
+
new_shape = tuple(operator.index(arg) for arg in args)
|
| 138 |
+
|
| 139 |
+
if current_shape is None:
|
| 140 |
+
if len(new_shape) != 2:
|
| 141 |
+
raise ValueError('shape must be a 2-tuple of positive integers')
|
| 142 |
+
elif new_shape[0] < 0 or new_shape[1] < 0:
|
| 143 |
+
raise ValueError("'shape' elements cannot be negative")
|
| 144 |
+
|
| 145 |
+
else:
|
| 146 |
+
current_size = numpy.prod(current_shape)
|
| 147 |
+
|
| 148 |
+
negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
|
| 149 |
+
if len(negative_indexes) == 0:
|
| 150 |
+
new_size = numpy.prod(new_shape)
|
| 151 |
+
if new_size != current_size:
|
| 152 |
+
raise ValueError('cannot reshape array of size {} into shape'
|
| 153 |
+
'{}'.format(current_size, new_shape))
|
| 154 |
+
elif len(negative_indexes) == 1:
|
| 155 |
+
skip = negative_indexes[0]
|
| 156 |
+
specified = numpy.prod(new_shape[0:skip] + new_shape[skip+1:])
|
| 157 |
+
unspecified, remainder = divmod(current_size, specified)
|
| 158 |
+
if remainder != 0:
|
| 159 |
+
err_shape = tuple('newshape'if x < 0 else x for x in new_shape)
|
| 160 |
+
raise ValueError('cannot reshape array of size {} into shape'
|
| 161 |
+
'{}'.format(current_size, err_shape))
|
| 162 |
+
new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:]
|
| 163 |
+
else:
|
| 164 |
+
raise ValueError('can only specify one unknown dimension')
|
| 165 |
+
|
| 166 |
+
if len(new_shape) != 2:
|
| 167 |
+
raise ValueError('matrix shape must be two-dimensional')
|
| 168 |
+
|
| 169 |
+
return new_shape
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/_util.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cupy
|
| 2 |
+
from cupy._core import core
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def isdense(x):
|
| 6 |
+
return isinstance(x, core.ndarray)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def isintlike(x):
|
| 10 |
+
try:
|
| 11 |
+
return bool(int(x) == x)
|
| 12 |
+
except (TypeError, ValueError):
|
| 13 |
+
return False
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def isscalarlike(x):
|
| 17 |
+
return cupy.isscalar(x) or (isdense(x) and x.ndim == 0)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def isshape(x):
|
| 21 |
+
if not isinstance(x, tuple) or len(x) != 2:
|
| 22 |
+
return False
|
| 23 |
+
m, n = x
|
| 24 |
+
if isinstance(n, tuple):
|
| 25 |
+
return False
|
| 26 |
+
return isintlike(m) and isintlike(n)
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Functions from the following SciPy document
|
| 2 |
+
# https://docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
|
| 3 |
+
|
| 4 |
+
from cupyx.scipy.sparse.csgraph._traversal import connected_components # NOQA
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (258 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/__pycache__/_traversal.cpython-310.pyc
ADDED
|
Binary file (3.51 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/csgraph/_traversal.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cupy
|
| 2 |
+
import cupyx.scipy.sparse
|
| 3 |
+
try:
|
| 4 |
+
import pylibcugraph
|
| 5 |
+
pylibcugraph_available = True
|
| 6 |
+
except ModuleNotFoundError:
|
| 7 |
+
pylibcugraph_available = False
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def connected_components(csgraph, directed=True, connection='weak',
|
| 11 |
+
return_labels=True):
|
| 12 |
+
"""Analyzes the connected components of a sparse graph
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency
|
| 16 |
+
matrix representing connectivity among nodes.
|
| 17 |
+
directed (bool): If ``True``, it operates on a directed graph. If
|
| 18 |
+
``False``, it operates on an undirected graph.
|
| 19 |
+
connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the
|
| 20 |
+
type of connection to use. Nodes i and j are "strongly" connected
|
| 21 |
+
only when a path exists both from i to j and from j to i.
|
| 22 |
+
If ``directed`` is ``False``, this argument is ignored.
|
| 23 |
+
return_labels (bool): If ``True``, it returns the labels for each of
|
| 24 |
+
the connected components.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
tuple of int and cupy.ndarray, or int:
|
| 28 |
+
If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``,
|
| 29 |
+
where ``n`` is the number of connected components and ``labels`` is
|
| 30 |
+
labels of each connected components. Otherwise, returns ``n``.
|
| 31 |
+
|
| 32 |
+
.. seealso:: :func:`scipy.sparse.csgraph.connected_components`
|
| 33 |
+
"""
|
| 34 |
+
if not pylibcugraph_available:
|
| 35 |
+
raise RuntimeError('pylibcugraph is not available')
|
| 36 |
+
|
| 37 |
+
connection = connection.lower()
|
| 38 |
+
if connection not in ('weak', 'strong'):
|
| 39 |
+
raise ValueError("connection must be 'weak' or 'strong'")
|
| 40 |
+
|
| 41 |
+
if not directed:
|
| 42 |
+
connection = 'weak'
|
| 43 |
+
|
| 44 |
+
if csgraph.ndim != 2:
|
| 45 |
+
raise ValueError('graph should have two dimensions')
|
| 46 |
+
|
| 47 |
+
if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
|
| 48 |
+
csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
|
| 49 |
+
m, m1 = csgraph.shape
|
| 50 |
+
if m != m1:
|
| 51 |
+
raise ValueError('graph should be a square array')
|
| 52 |
+
if csgraph.nnz == 0:
|
| 53 |
+
return m, cupy.arange(m, dtype=csgraph.indices.dtype)
|
| 54 |
+
|
| 55 |
+
if connection == 'strong':
|
| 56 |
+
labels = cupy.empty(m, dtype=csgraph.indices.dtype)
|
| 57 |
+
pylibcugraph.strongly_connected_components(
|
| 58 |
+
offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
|
| 59 |
+
num_verts=m, num_edges=csgraph.nnz, labels=labels)
|
| 60 |
+
else:
|
| 61 |
+
csgraph += csgraph.T
|
| 62 |
+
if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
|
| 63 |
+
csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
|
| 64 |
+
_, labels = pylibcugraph.weakly_connected_components(
|
| 65 |
+
resource_handle=None,
|
| 66 |
+
graph=None,
|
| 67 |
+
indices=csgraph.indices,
|
| 68 |
+
offsets=csgraph.indptr,
|
| 69 |
+
weights=None,
|
| 70 |
+
labels=None,
|
| 71 |
+
do_expensive_check=False,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
count = cupy.zeros((1,), dtype=csgraph.indices.dtype)
|
| 75 |
+
root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)
|
| 76 |
+
_cupy_count_components(labels, count, root_labels, size=m)
|
| 77 |
+
n = int(count[0])
|
| 78 |
+
if not return_labels:
|
| 79 |
+
return n
|
| 80 |
+
_cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels)
|
| 81 |
+
return n, labels
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
_cupy_count_components = cupy.ElementwiseKernel(
|
| 85 |
+
'',
|
| 86 |
+
'raw I labels, raw int32 count, raw int32 root_labels',
|
| 87 |
+
'''
|
| 88 |
+
int j = i;
|
| 89 |
+
while (j != labels[j]) { j = labels[j]; }
|
| 90 |
+
if (j != i) {
|
| 91 |
+
labels[i] = j;
|
| 92 |
+
} else {
|
| 93 |
+
int k = atomicAdd(&count[0], 1);
|
| 94 |
+
root_labels[k] = i;
|
| 95 |
+
}
|
| 96 |
+
''',
|
| 97 |
+
'_cupy_count_components')
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
_cupy_adjust_labels = cupy.ElementwiseKernel(
|
| 101 |
+
'int32 n_root_labels, raw I root_labels',
|
| 102 |
+
'I labels',
|
| 103 |
+
'''
|
| 104 |
+
int cur_label = labels;
|
| 105 |
+
int j_min = 0;
|
| 106 |
+
int j_max = n_root_labels - 1;
|
| 107 |
+
int j = (j_min + j_max) / 2;
|
| 108 |
+
while (j_min < j_max) {
|
| 109 |
+
if (cur_label == root_labels[j]) break;
|
| 110 |
+
if (cur_label < root_labels[j]) {
|
| 111 |
+
j_max = j - 1;
|
| 112 |
+
} else {
|
| 113 |
+
j_min = j + 1;
|
| 114 |
+
}
|
| 115 |
+
j = (j_min + j_max) / 2;
|
| 116 |
+
}
|
| 117 |
+
labels = j;
|
| 118 |
+
''',
|
| 119 |
+
'_cupy_adjust_labels')
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Functions from the following SciPy document
|
| 2 |
+
# https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html
|
| 3 |
+
|
| 4 |
+
# "NOQA" to suppress flake8 warning
|
| 5 |
+
from cupyx.scipy.sparse.linalg._norm import norm # NOQA
|
| 6 |
+
from cupyx.scipy.sparse.linalg._solve import spsolve # NOQA
|
| 7 |
+
from cupyx.scipy.sparse.linalg._solve import spsolve_triangular # NOQA
|
| 8 |
+
from cupyx.scipy.sparse.linalg._solve import factorized # NOQA
|
| 9 |
+
from cupyx.scipy.sparse.linalg._solve import lsqr # NOQA
|
| 10 |
+
from cupyx.scipy.sparse.linalg._solve import lsmr # NOQA
|
| 11 |
+
from cupyx.scipy.sparse.linalg._solve import splu # NOQA
|
| 12 |
+
from cupyx.scipy.sparse.linalg._solve import spilu # NOQA
|
| 13 |
+
from cupyx.scipy.sparse.linalg._solve import SuperLU # NOQA
|
| 14 |
+
from cupyx.scipy.sparse.linalg._solve import minres # NOQA
|
| 15 |
+
from cupyx.scipy.sparse.linalg._eigen import eigsh # NOQA
|
| 16 |
+
from cupyx.scipy.sparse.linalg._eigen import svds # NOQA
|
| 17 |
+
from cupyx.scipy.sparse.linalg._iterative import cg # NOQA
|
| 18 |
+
from cupyx.scipy.sparse.linalg._iterative import gmres # NOQA
|
| 19 |
+
from cupyx.scipy.sparse.linalg._iterative import cgs # NOQA
|
| 20 |
+
from cupyx.scipy.sparse.linalg._interface import LinearOperator # NOQA
|
| 21 |
+
from cupyx.scipy.sparse.linalg._interface import aslinearoperator # NOQA
|
| 22 |
+
from cupyx.scipy.sparse.linalg._lobpcg import lobpcg # NOQA
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (925 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_eigen.cpython-310.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_iterative.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_lobpcg.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc
ADDED
|
Binary file (3.03 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/__pycache__/_solve.cpython-310.pyc
ADDED
|
Binary file (23.8 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_eigen.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
import cupy
|
| 3 |
+
|
| 4 |
+
from cupy import cublas
|
| 5 |
+
from cupy._core import _dtype
|
| 6 |
+
from cupy.cuda import device
|
| 7 |
+
from cupy_backends.cuda.libs import cublas as _cublas
|
| 8 |
+
from cupyx.scipy.sparse import _csr
|
| 9 |
+
from cupyx.scipy.sparse.linalg import _interface
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def eigsh(a, k=6, *, which='LM', v0=None, ncv=None, maxiter=None,
|
| 13 |
+
tol=0, return_eigenvectors=True):
|
| 14 |
+
"""
|
| 15 |
+
Find ``k`` eigenvalues and eigenvectors of the real symmetric square
|
| 16 |
+
matrix or complex Hermitian matrix ``A``.
|
| 17 |
+
|
| 18 |
+
Solves ``Ax = wx``, the standard eigenvalue problem for ``w`` eigenvalues
|
| 19 |
+
with corresponding eigenvectors ``x``.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
a (ndarray, spmatrix or LinearOperator): A symmetric square matrix with
|
| 23 |
+
dimension ``(n, n)``. ``a`` must :class:`cupy.ndarray`,
|
| 24 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 25 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 26 |
+
k (int): The number of eigenvalues and eigenvectors to compute. Must be
|
| 27 |
+
``1 <= k < n``.
|
| 28 |
+
which (str): 'LM' or 'LA' or 'SA'.
|
| 29 |
+
'LM': finds ``k`` largest (in magnitude) eigenvalues.
|
| 30 |
+
'LA': finds ``k`` largest (algebraic) eigenvalues.
|
| 31 |
+
'SA': finds ``k`` smallest (algebraic) eigenvalues.
|
| 32 |
+
|
| 33 |
+
v0 (ndarray): Starting vector for iteration. If ``None``, a random
|
| 34 |
+
unit vector is used.
|
| 35 |
+
ncv (int): The number of Lanczos vectors generated. Must be
|
| 36 |
+
``k + 1 < ncv < n``. If ``None``, default value is used.
|
| 37 |
+
maxiter (int): Maximum number of Lanczos update iterations.
|
| 38 |
+
If ``None``, default value is used.
|
| 39 |
+
tol (float): Tolerance for residuals ``||Ax - wx||``. If ``0``, machine
|
| 40 |
+
precision is used.
|
| 41 |
+
return_eigenvectors (bool): If ``True``, returns eigenvectors in
|
| 42 |
+
addition to eigenvalues.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
tuple:
|
| 46 |
+
If ``return_eigenvectors is True``, it returns ``w`` and ``x``
|
| 47 |
+
where ``w`` is eigenvalues and ``x`` is eigenvectors. Otherwise,
|
| 48 |
+
it returns only ``w``.
|
| 49 |
+
|
| 50 |
+
.. seealso:: :func:`scipy.sparse.linalg.eigsh`
|
| 51 |
+
|
| 52 |
+
.. note::
|
| 53 |
+
This function uses the thick-restart Lanczos methods
|
| 54 |
+
(https://sdm.lbl.gov/~kewu/ps/trlan.html).
|
| 55 |
+
|
| 56 |
+
"""
|
| 57 |
+
n = a.shape[0]
|
| 58 |
+
if a.ndim != 2 or a.shape[0] != a.shape[1]:
|
| 59 |
+
raise ValueError('expected square matrix (shape: {})'.format(a.shape))
|
| 60 |
+
if a.dtype.char not in 'fdFD':
|
| 61 |
+
raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype))
|
| 62 |
+
if k <= 0:
|
| 63 |
+
raise ValueError('k must be greater than 0 (actual: {})'.format(k))
|
| 64 |
+
if k >= n:
|
| 65 |
+
raise ValueError('k must be smaller than n (actual: {})'.format(k))
|
| 66 |
+
if which not in ('LM', 'LA', 'SA'):
|
| 67 |
+
raise ValueError('which must be \'LM\',\'LA\'or\'SA\' (actual: {})'
|
| 68 |
+
''.format(which))
|
| 69 |
+
if ncv is None:
|
| 70 |
+
ncv = min(max(2 * k, k + 32), n - 1)
|
| 71 |
+
else:
|
| 72 |
+
ncv = min(max(ncv, k + 2), n - 1)
|
| 73 |
+
if maxiter is None:
|
| 74 |
+
maxiter = 10 * n
|
| 75 |
+
if tol == 0:
|
| 76 |
+
tol = numpy.finfo(a.dtype).eps
|
| 77 |
+
|
| 78 |
+
alpha = cupy.zeros((ncv,), dtype=a.dtype)
|
| 79 |
+
beta = cupy.zeros((ncv,), dtype=a.dtype.char.lower())
|
| 80 |
+
V = cupy.empty((ncv, n), dtype=a.dtype)
|
| 81 |
+
|
| 82 |
+
# Set initial vector
|
| 83 |
+
if v0 is None:
|
| 84 |
+
u = cupy.random.random((n,)).astype(a.dtype)
|
| 85 |
+
V[0] = u / cublas.nrm2(u)
|
| 86 |
+
else:
|
| 87 |
+
u = v0
|
| 88 |
+
V[0] = v0 / cublas.nrm2(v0)
|
| 89 |
+
|
| 90 |
+
# Choose Lanczos implementation, unconditionally use 'fast' for now
|
| 91 |
+
upadte_impl = 'fast'
|
| 92 |
+
if upadte_impl == 'fast':
|
| 93 |
+
lanczos = _lanczos_fast(a, n, ncv)
|
| 94 |
+
else:
|
| 95 |
+
lanczos = _lanczos_asis
|
| 96 |
+
|
| 97 |
+
# Lanczos iteration
|
| 98 |
+
lanczos(a, V, u, alpha, beta, 0, ncv)
|
| 99 |
+
|
| 100 |
+
iter = ncv
|
| 101 |
+
w, s = _eigsh_solve_ritz(alpha, beta, None, k, which)
|
| 102 |
+
x = V.T @ s
|
| 103 |
+
|
| 104 |
+
# Compute residual
|
| 105 |
+
beta_k = beta[-1] * s[-1, :]
|
| 106 |
+
res = cublas.nrm2(beta_k)
|
| 107 |
+
|
| 108 |
+
uu = cupy.empty((k,), dtype=a.dtype)
|
| 109 |
+
|
| 110 |
+
while res > tol and iter < maxiter:
|
| 111 |
+
# Setup for thick-restart
|
| 112 |
+
beta[:k] = 0
|
| 113 |
+
alpha[:k] = w
|
| 114 |
+
V[:k] = x.T
|
| 115 |
+
|
| 116 |
+
# u -= u.T @ V[:k].conj().T @ V[:k]
|
| 117 |
+
cublas.gemv(_cublas.CUBLAS_OP_C, 1, V[:k].T, u, 0, uu)
|
| 118 |
+
cublas.gemv(_cublas.CUBLAS_OP_N, -1, V[:k].T, uu, 1, u)
|
| 119 |
+
V[k] = u / cublas.nrm2(u)
|
| 120 |
+
|
| 121 |
+
u[...] = a @ V[k]
|
| 122 |
+
cublas.dotc(V[k], u, out=alpha[k])
|
| 123 |
+
u -= alpha[k] * V[k]
|
| 124 |
+
u -= V[:k].T @ beta_k
|
| 125 |
+
cublas.nrm2(u, out=beta[k])
|
| 126 |
+
V[k+1] = u / beta[k]
|
| 127 |
+
|
| 128 |
+
# Lanczos iteration
|
| 129 |
+
lanczos(a, V, u, alpha, beta, k + 1, ncv)
|
| 130 |
+
|
| 131 |
+
iter += ncv - k
|
| 132 |
+
w, s = _eigsh_solve_ritz(alpha, beta, beta_k, k, which)
|
| 133 |
+
x = V.T @ s
|
| 134 |
+
|
| 135 |
+
# Compute residual
|
| 136 |
+
beta_k = beta[-1] * s[-1, :]
|
| 137 |
+
res = cublas.nrm2(beta_k)
|
| 138 |
+
|
| 139 |
+
if return_eigenvectors:
|
| 140 |
+
idx = cupy.argsort(w)
|
| 141 |
+
return w[idx], x[:, idx]
|
| 142 |
+
else:
|
| 143 |
+
return cupy.sort(w)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def _lanczos_asis(a, V, u, alpha, beta, i_start, i_end):
|
| 147 |
+
for i in range(i_start, i_end):
|
| 148 |
+
u[...] = a @ V[i]
|
| 149 |
+
cublas.dotc(V[i], u, out=alpha[i])
|
| 150 |
+
u -= u.T @ V[:i+1].conj().T @ V[:i+1]
|
| 151 |
+
cublas.nrm2(u, out=beta[i])
|
| 152 |
+
if i >= i_end - 1:
|
| 153 |
+
break
|
| 154 |
+
V[i+1] = u / beta[i]
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def _lanczos_fast(A, n, ncv):
|
| 158 |
+
from cupy_backends.cuda.libs import cusparse as _cusparse
|
| 159 |
+
from cupyx import cusparse
|
| 160 |
+
|
| 161 |
+
cublas_handle = device.get_cublas_handle()
|
| 162 |
+
cublas_pointer_mode = _cublas.getPointerMode(cublas_handle)
|
| 163 |
+
if A.dtype.char == 'f':
|
| 164 |
+
dotc = _cublas.sdot
|
| 165 |
+
nrm2 = _cublas.snrm2
|
| 166 |
+
gemv = _cublas.sgemv
|
| 167 |
+
axpy = _cublas.saxpy
|
| 168 |
+
elif A.dtype.char == 'd':
|
| 169 |
+
dotc = _cublas.ddot
|
| 170 |
+
nrm2 = _cublas.dnrm2
|
| 171 |
+
gemv = _cublas.dgemv
|
| 172 |
+
axpy = _cublas.daxpy
|
| 173 |
+
elif A.dtype.char == 'F':
|
| 174 |
+
dotc = _cublas.cdotc
|
| 175 |
+
nrm2 = _cublas.scnrm2
|
| 176 |
+
gemv = _cublas.cgemv
|
| 177 |
+
axpy = _cublas.caxpy
|
| 178 |
+
elif A.dtype.char == 'D':
|
| 179 |
+
dotc = _cublas.zdotc
|
| 180 |
+
nrm2 = _cublas.dznrm2
|
| 181 |
+
gemv = _cublas.zgemv
|
| 182 |
+
axpy = _cublas.zaxpy
|
| 183 |
+
else:
|
| 184 |
+
raise TypeError('invalid dtype ({})'.format(A.dtype))
|
| 185 |
+
|
| 186 |
+
cusparse_handle = None
|
| 187 |
+
if _csr.isspmatrix_csr(A) and cusparse.check_availability('spmv'):
|
| 188 |
+
cusparse_handle = device.get_cusparse_handle()
|
| 189 |
+
spmv_op_a = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE
|
| 190 |
+
spmv_alpha = numpy.array(1.0, A.dtype)
|
| 191 |
+
spmv_beta = numpy.array(0.0, A.dtype)
|
| 192 |
+
spmv_cuda_dtype = _dtype.to_cuda_dtype(A.dtype)
|
| 193 |
+
spmv_alg = _cusparse.CUSPARSE_MV_ALG_DEFAULT
|
| 194 |
+
|
| 195 |
+
v = cupy.empty((n,), dtype=A.dtype)
|
| 196 |
+
uu = cupy.empty((ncv,), dtype=A.dtype)
|
| 197 |
+
vv = cupy.empty((n,), dtype=A.dtype)
|
| 198 |
+
b = cupy.empty((), dtype=A.dtype)
|
| 199 |
+
one = numpy.array(1.0, dtype=A.dtype)
|
| 200 |
+
zero = numpy.array(0.0, dtype=A.dtype)
|
| 201 |
+
mone = numpy.array(-1.0, dtype=A.dtype)
|
| 202 |
+
|
| 203 |
+
outer_A = A
|
| 204 |
+
|
| 205 |
+
def aux(A, V, u, alpha, beta, i_start, i_end):
|
| 206 |
+
assert A is outer_A
|
| 207 |
+
|
| 208 |
+
# Get ready for spmv if enabled
|
| 209 |
+
if cusparse_handle is not None:
|
| 210 |
+
# Note: I would like to reuse descriptors and working buffer
|
| 211 |
+
# on the next update, but I gave it up because it sometimes
|
| 212 |
+
# caused illegal memory access error.
|
| 213 |
+
spmv_desc_A = cusparse.SpMatDescriptor.create(A)
|
| 214 |
+
spmv_desc_v = cusparse.DnVecDescriptor.create(v)
|
| 215 |
+
spmv_desc_u = cusparse.DnVecDescriptor.create(u)
|
| 216 |
+
buff_size = _cusparse.spMV_bufferSize(
|
| 217 |
+
cusparse_handle, spmv_op_a, spmv_alpha.ctypes.data,
|
| 218 |
+
spmv_desc_A.desc, spmv_desc_v.desc, spmv_beta.ctypes.data,
|
| 219 |
+
spmv_desc_u.desc, spmv_cuda_dtype, spmv_alg)
|
| 220 |
+
spmv_buff = cupy.empty(buff_size, cupy.int8)
|
| 221 |
+
|
| 222 |
+
v[...] = V[i_start]
|
| 223 |
+
for i in range(i_start, i_end):
|
| 224 |
+
# Matrix-vector multiplication
|
| 225 |
+
if cusparse_handle is None:
|
| 226 |
+
u[...] = A @ v
|
| 227 |
+
else:
|
| 228 |
+
_cusparse.spMV(
|
| 229 |
+
cusparse_handle, spmv_op_a, spmv_alpha.ctypes.data,
|
| 230 |
+
spmv_desc_A.desc, spmv_desc_v.desc,
|
| 231 |
+
spmv_beta.ctypes.data, spmv_desc_u.desc,
|
| 232 |
+
spmv_cuda_dtype, spmv_alg, spmv_buff.data.ptr)
|
| 233 |
+
|
| 234 |
+
# Call dotc: alpha[i] = v.conj().T @ u
|
| 235 |
+
_cublas.setPointerMode(
|
| 236 |
+
cublas_handle, _cublas.CUBLAS_POINTER_MODE_DEVICE)
|
| 237 |
+
try:
|
| 238 |
+
dotc(cublas_handle, n, v.data.ptr, 1, u.data.ptr, 1,
|
| 239 |
+
alpha.data.ptr + i * alpha.itemsize)
|
| 240 |
+
finally:
|
| 241 |
+
_cublas.setPointerMode(cublas_handle, cublas_pointer_mode)
|
| 242 |
+
|
| 243 |
+
# Orthogonalize: u = u - alpha[i] * v - beta[i - 1] * V[i - 1]
|
| 244 |
+
vv.fill(0)
|
| 245 |
+
b[...] = beta[i - 1] # cast from real to complex
|
| 246 |
+
_cublas.setPointerMode(
|
| 247 |
+
cublas_handle, _cublas.CUBLAS_POINTER_MODE_DEVICE)
|
| 248 |
+
try:
|
| 249 |
+
axpy(cublas_handle, n,
|
| 250 |
+
alpha.data.ptr + i * alpha.itemsize,
|
| 251 |
+
v.data.ptr, 1, vv.data.ptr, 1)
|
| 252 |
+
axpy(cublas_handle, n,
|
| 253 |
+
b.data.ptr,
|
| 254 |
+
V[i - 1].data.ptr, 1, vv.data.ptr, 1)
|
| 255 |
+
finally:
|
| 256 |
+
_cublas.setPointerMode(cublas_handle, cublas_pointer_mode)
|
| 257 |
+
axpy(cublas_handle, n,
|
| 258 |
+
mone.ctypes.data,
|
| 259 |
+
vv.data.ptr, 1, u.data.ptr, 1)
|
| 260 |
+
|
| 261 |
+
# Reorthogonalize: u -= V @ (V.conj().T @ u)
|
| 262 |
+
gemv(cublas_handle, _cublas.CUBLAS_OP_C,
|
| 263 |
+
n, i + 1,
|
| 264 |
+
one.ctypes.data, V.data.ptr, n,
|
| 265 |
+
u.data.ptr, 1,
|
| 266 |
+
zero.ctypes.data, uu.data.ptr, 1)
|
| 267 |
+
gemv(cublas_handle, _cublas.CUBLAS_OP_N,
|
| 268 |
+
n, i + 1,
|
| 269 |
+
mone.ctypes.data, V.data.ptr, n,
|
| 270 |
+
uu.data.ptr, 1,
|
| 271 |
+
one.ctypes.data, u.data.ptr, 1)
|
| 272 |
+
alpha[i] += uu[i]
|
| 273 |
+
|
| 274 |
+
# Call nrm2
|
| 275 |
+
_cublas.setPointerMode(
|
| 276 |
+
cublas_handle, _cublas.CUBLAS_POINTER_MODE_DEVICE)
|
| 277 |
+
try:
|
| 278 |
+
nrm2(cublas_handle, n, u.data.ptr, 1,
|
| 279 |
+
beta.data.ptr + i * beta.itemsize)
|
| 280 |
+
finally:
|
| 281 |
+
_cublas.setPointerMode(cublas_handle, cublas_pointer_mode)
|
| 282 |
+
|
| 283 |
+
# Break here as the normalization below touches V[i+1]
|
| 284 |
+
if i >= i_end - 1:
|
| 285 |
+
break
|
| 286 |
+
|
| 287 |
+
# Normalize
|
| 288 |
+
_kernel_normalize(u, beta, i, n, v, V)
|
| 289 |
+
|
| 290 |
+
return aux
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
_kernel_normalize = cupy.ElementwiseKernel(
|
| 294 |
+
'T u, raw S beta, int32 j, int32 n', 'T v, raw T V',
|
| 295 |
+
'v = u / beta[j]; V[i + (j+1) * n] = v;', 'cupy_eigsh_normalize'
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def _eigsh_solve_ritz(alpha, beta, beta_k, k, which):
|
| 300 |
+
# Note: This is done on the CPU, because there is an issue in
|
| 301 |
+
# cupy.linalg.eigh with CUDA 9.2, which can return NaNs. It will has little
|
| 302 |
+
# impact on performance, since the matrix size processed here is not large.
|
| 303 |
+
alpha = cupy.asnumpy(alpha)
|
| 304 |
+
beta = cupy.asnumpy(beta)
|
| 305 |
+
t = numpy.diag(alpha)
|
| 306 |
+
t = t + numpy.diag(beta[:-1], k=1)
|
| 307 |
+
t = t + numpy.diag(beta[:-1], k=-1)
|
| 308 |
+
if beta_k is not None:
|
| 309 |
+
beta_k = cupy.asnumpy(beta_k)
|
| 310 |
+
t[k, :k] = beta_k
|
| 311 |
+
t[:k, k] = beta_k
|
| 312 |
+
w, s = numpy.linalg.eigh(t)
|
| 313 |
+
|
| 314 |
+
# Pick-up k ritz-values and ritz-vectors
|
| 315 |
+
if which == 'LA':
|
| 316 |
+
idx = numpy.argsort(w)
|
| 317 |
+
wk = w[idx[-k:]]
|
| 318 |
+
sk = s[:, idx[-k:]]
|
| 319 |
+
elif which == 'LM':
|
| 320 |
+
idx = numpy.argsort(numpy.absolute(w))
|
| 321 |
+
wk = w[idx[-k:]]
|
| 322 |
+
sk = s[:, idx[-k:]]
|
| 323 |
+
|
| 324 |
+
elif which == 'SA':
|
| 325 |
+
idx = numpy.argsort(w)
|
| 326 |
+
wk = w[idx[:k]]
|
| 327 |
+
sk = s[:, idx[:k]]
|
| 328 |
+
# elif which == 'SM': #dysfunctional
|
| 329 |
+
# idx = cupy.argsort(abs(w))
|
| 330 |
+
# wk = w[idx[:k]]
|
| 331 |
+
# sk = s[:,idx[:k]]
|
| 332 |
+
return cupy.array(wk), cupy.array(sk)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def svds(a, k=6, *, ncv=None, tol=0, which='LM', maxiter=None,
|
| 336 |
+
return_singular_vectors=True):
|
| 337 |
+
"""Finds the largest ``k`` singular values/vectors for a sparse matrix.
|
| 338 |
+
|
| 339 |
+
Args:
|
| 340 |
+
a (ndarray, spmatrix or LinearOperator): A real or complex array with
|
| 341 |
+
dimension ``(m, n)``. ``a`` must :class:`cupy.ndarray`,
|
| 342 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 343 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 344 |
+
k (int): The number of singular values/vectors to compute. Must be
|
| 345 |
+
``1 <= k < min(m, n)``.
|
| 346 |
+
ncv (int): The number of Lanczos vectors generated. Must be
|
| 347 |
+
``k + 1 < ncv < min(m, n)``. If ``None``, default value is used.
|
| 348 |
+
tol (float): Tolerance for singular values. If ``0``, machine precision
|
| 349 |
+
is used.
|
| 350 |
+
which (str): Only 'LM' is supported. 'LM': finds ``k`` largest singular
|
| 351 |
+
values.
|
| 352 |
+
maxiter (int): Maximum number of Lanczos update iterations.
|
| 353 |
+
If ``None``, default value is used.
|
| 354 |
+
return_singular_vectors (bool): If ``True``, returns singular vectors
|
| 355 |
+
in addition to singular values.
|
| 356 |
+
|
| 357 |
+
Returns:
|
| 358 |
+
tuple:
|
| 359 |
+
If ``return_singular_vectors`` is ``True``, it returns ``u``, ``s``
|
| 360 |
+
and ``vt`` where ``u`` is left singular vectors, ``s`` is singular
|
| 361 |
+
values and ``vt`` is right singular vectors. Otherwise, it returns
|
| 362 |
+
only ``s``.
|
| 363 |
+
|
| 364 |
+
.. seealso:: :func:`scipy.sparse.linalg.svds`
|
| 365 |
+
|
| 366 |
+
.. note::
|
| 367 |
+
This is a naive implementation using cupyx.scipy.sparse.linalg.eigsh as
|
| 368 |
+
an eigensolver on ``a.H @ a`` or ``a @ a.H``.
|
| 369 |
+
|
| 370 |
+
"""
|
| 371 |
+
if a.ndim != 2:
|
| 372 |
+
raise ValueError('expected 2D (shape: {})'.format(a.shape))
|
| 373 |
+
if a.dtype.char not in 'fdFD':
|
| 374 |
+
raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype))
|
| 375 |
+
m, n = a.shape
|
| 376 |
+
if k <= 0:
|
| 377 |
+
raise ValueError('k must be greater than 0 (actual: {})'.format(k))
|
| 378 |
+
if k >= min(m, n):
|
| 379 |
+
raise ValueError('k must be smaller than min(m, n) (actual: {})'
|
| 380 |
+
''.format(k))
|
| 381 |
+
|
| 382 |
+
a = _interface.aslinearoperator(a)
|
| 383 |
+
if m >= n:
|
| 384 |
+
aH, a = a.H, a
|
| 385 |
+
else:
|
| 386 |
+
aH, a = a, a.H
|
| 387 |
+
|
| 388 |
+
if return_singular_vectors:
|
| 389 |
+
w, x = eigsh(aH @ a, k=k, which=which, ncv=ncv, maxiter=maxiter,
|
| 390 |
+
tol=tol, return_eigenvectors=True)
|
| 391 |
+
else:
|
| 392 |
+
w = eigsh(aH @ a, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol,
|
| 393 |
+
return_eigenvectors=False)
|
| 394 |
+
|
| 395 |
+
w = cupy.maximum(w, 0)
|
| 396 |
+
t = w.dtype.char.lower()
|
| 397 |
+
factor = {'f': 1e3, 'd': 1e6}
|
| 398 |
+
cond = factor[t] * numpy.finfo(t).eps
|
| 399 |
+
cutoff = cond * cupy.max(w)
|
| 400 |
+
above_cutoff = (w > cutoff)
|
| 401 |
+
n_large = above_cutoff.sum().item()
|
| 402 |
+
s = cupy.zeros_like(w)
|
| 403 |
+
s[:n_large] = cupy.sqrt(w[above_cutoff])
|
| 404 |
+
if not return_singular_vectors:
|
| 405 |
+
return s
|
| 406 |
+
|
| 407 |
+
x = x[:, above_cutoff]
|
| 408 |
+
if m >= n:
|
| 409 |
+
v = x
|
| 410 |
+
u = a @ v / s[:n_large]
|
| 411 |
+
else:
|
| 412 |
+
u = x
|
| 413 |
+
v = a @ u / s[:n_large]
|
| 414 |
+
u = _augmented_orthnormal_cols(u, k - n_large)
|
| 415 |
+
v = _augmented_orthnormal_cols(v, k - n_large)
|
| 416 |
+
|
| 417 |
+
return u, s, v.conj().T
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def _augmented_orthnormal_cols(x, n_aug):
|
| 421 |
+
if n_aug <= 0:
|
| 422 |
+
return x
|
| 423 |
+
m, n = x.shape
|
| 424 |
+
y = cupy.empty((m, n + n_aug), dtype=x.dtype)
|
| 425 |
+
y[:, :n] = x
|
| 426 |
+
for i in range(n, n + n_aug):
|
| 427 |
+
v = cupy.random.random((m, )).astype(x.dtype)
|
| 428 |
+
v -= v @ y[:, :i].conj() @ y[:, :i].T
|
| 429 |
+
y[:, i] = v / cupy.linalg.norm(v)
|
| 430 |
+
return y
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_interface.py
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import cupy
|
| 4 |
+
|
| 5 |
+
from cupyx.scipy import sparse
|
| 6 |
+
from cupyx.scipy.sparse import _util
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class LinearOperator(object):
|
| 10 |
+
"""LinearOperator(shape, matvec, rmatvec=None, matmat=None, dtype=None, \
|
| 11 |
+
rmatmat=None)
|
| 12 |
+
|
| 13 |
+
Common interface for performing matrix vector products
|
| 14 |
+
|
| 15 |
+
To construct a concrete LinearOperator, either pass appropriate callables
|
| 16 |
+
to the constructor of this class, or subclass it.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
shape (tuple): Matrix dimensions ``(M, N)``.
|
| 20 |
+
matvec (callable f(v)): Returns returns ``A * v``.
|
| 21 |
+
rmatvec (callable f(v)): Returns ``A^H * v``, where ``A^H`` is the
|
| 22 |
+
conjugate transpose of ``A``.
|
| 23 |
+
matmat (callable f(V)): Returns ``A * V``, where ``V`` is a dense
|
| 24 |
+
matrix with dimensions ``(N, K)``.
|
| 25 |
+
dtype (dtype): Data type of the matrix.
|
| 26 |
+
rmatmat (callable f(V)): Returns ``A^H * V``, where ``V`` is a dense
|
| 27 |
+
matrix with dimensions ``(M, K)``.
|
| 28 |
+
|
| 29 |
+
.. seealso:: :class:`scipy.sparse.linalg.LinearOperator`
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
ndim = 2
|
| 33 |
+
|
| 34 |
+
def __new__(cls, *args, **kwargs):
|
| 35 |
+
if cls is LinearOperator:
|
| 36 |
+
# Operate as _CustomLinearOperator factory.
|
| 37 |
+
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
|
| 38 |
+
else:
|
| 39 |
+
obj = super(LinearOperator, cls).__new__(cls)
|
| 40 |
+
|
| 41 |
+
if (type(obj)._matvec == LinearOperator._matvec
|
| 42 |
+
and type(obj)._matmat == LinearOperator._matmat):
|
| 43 |
+
warnings.warn('LinearOperator subclass should implement'
|
| 44 |
+
' at least one of _matvec and _matmat.',
|
| 45 |
+
category=RuntimeWarning, stacklevel=2)
|
| 46 |
+
|
| 47 |
+
return obj
|
| 48 |
+
|
| 49 |
+
def __init__(self, dtype, shape):
|
| 50 |
+
"""Initialize this :class:`LinearOperator`
|
| 51 |
+
"""
|
| 52 |
+
if dtype is not None:
|
| 53 |
+
dtype = cupy.dtype(dtype)
|
| 54 |
+
|
| 55 |
+
shape = tuple(shape)
|
| 56 |
+
if not _util.isshape(shape):
|
| 57 |
+
raise ValueError('invalid shape %r (must be 2-d)' % (shape,))
|
| 58 |
+
|
| 59 |
+
self.dtype = dtype
|
| 60 |
+
self.shape = shape
|
| 61 |
+
|
| 62 |
+
def _init_dtype(self):
|
| 63 |
+
"""Called from subclasses at the end of the `__init__` routine.
|
| 64 |
+
"""
|
| 65 |
+
if self.dtype is None:
|
| 66 |
+
v = cupy.zeros(self.shape[-1])
|
| 67 |
+
self.dtype = self.matvec(v).dtype
|
| 68 |
+
|
| 69 |
+
def _matmat(self, X):
|
| 70 |
+
"""Default matrix-matrix multiplication handler.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
return cupy.hstack([self.matvec(col.reshape(-1, 1)) for col in X.T])
|
| 74 |
+
|
| 75 |
+
def _matvec(self, x):
|
| 76 |
+
"""Default matrix-vector multiplication handler.
|
| 77 |
+
"""
|
| 78 |
+
return self.matmat(x.reshape(-1, 1))
|
| 79 |
+
|
| 80 |
+
def matvec(self, x):
|
| 81 |
+
"""Matrix-vector multiplication.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
M, N = self.shape
|
| 85 |
+
|
| 86 |
+
if x.shape != (N,) and x.shape != (N, 1):
|
| 87 |
+
raise ValueError('dimension mismatch')
|
| 88 |
+
|
| 89 |
+
y = self._matvec(x)
|
| 90 |
+
|
| 91 |
+
if x.ndim == 1:
|
| 92 |
+
y = y.reshape(M)
|
| 93 |
+
elif x.ndim == 2:
|
| 94 |
+
y = y.reshape(M, 1)
|
| 95 |
+
else:
|
| 96 |
+
raise ValueError('invalid shape returned by user-defined matvec()')
|
| 97 |
+
|
| 98 |
+
return y
|
| 99 |
+
|
| 100 |
+
def rmatvec(self, x):
|
| 101 |
+
"""Adjoint matrix-vector multiplication.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
M, N = self.shape
|
| 105 |
+
|
| 106 |
+
if x.shape != (M,) and x.shape != (M, 1):
|
| 107 |
+
raise ValueError('dimension mismatch')
|
| 108 |
+
|
| 109 |
+
y = self._rmatvec(x)
|
| 110 |
+
|
| 111 |
+
if x.ndim == 1:
|
| 112 |
+
y = y.reshape(N)
|
| 113 |
+
elif x.ndim == 2:
|
| 114 |
+
y = y.reshape(N, 1)
|
| 115 |
+
else:
|
| 116 |
+
raise ValueError(
|
| 117 |
+
'invalid shape returned by user-defined rmatvec()')
|
| 118 |
+
|
| 119 |
+
return y
|
| 120 |
+
|
| 121 |
+
def _rmatvec(self, x):
|
| 122 |
+
"""Default implementation of _rmatvec; defers to adjoint.
|
| 123 |
+
"""
|
| 124 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
| 125 |
+
# _adjoint not overridden, prevent infinite recursion
|
| 126 |
+
raise NotImplementedError
|
| 127 |
+
else:
|
| 128 |
+
return self.H.matvec(x)
|
| 129 |
+
|
| 130 |
+
def matmat(self, X):
|
| 131 |
+
"""Matrix-matrix multiplication.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
if X.ndim != 2:
|
| 135 |
+
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
|
| 136 |
+
% X.ndim)
|
| 137 |
+
|
| 138 |
+
if X.shape[0] != self.shape[1]:
|
| 139 |
+
raise ValueError('dimension mismatch: %r, %r'
|
| 140 |
+
% (self.shape, X.shape))
|
| 141 |
+
|
| 142 |
+
Y = self._matmat(X)
|
| 143 |
+
|
| 144 |
+
return Y
|
| 145 |
+
|
| 146 |
+
def rmatmat(self, X):
|
| 147 |
+
"""Adjoint matrix-matrix multiplication.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
if X.ndim != 2:
|
| 151 |
+
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
|
| 152 |
+
% X.ndim)
|
| 153 |
+
|
| 154 |
+
if X.shape[0] != self.shape[0]:
|
| 155 |
+
raise ValueError('dimension mismatch: %r, %r'
|
| 156 |
+
% (self.shape, X.shape))
|
| 157 |
+
|
| 158 |
+
Y = self._rmatmat(X)
|
| 159 |
+
return Y
|
| 160 |
+
|
| 161 |
+
def _rmatmat(self, X):
|
| 162 |
+
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
|
| 163 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
| 164 |
+
return cupy.hstack([self.rmatvec(col.reshape(-1, 1))
|
| 165 |
+
for col in X.T])
|
| 166 |
+
else:
|
| 167 |
+
return self.H.matmat(X)
|
| 168 |
+
|
| 169 |
+
def __call__(self, x):
|
| 170 |
+
return self*x
|
| 171 |
+
|
| 172 |
+
def __mul__(self, x):
|
| 173 |
+
return self.dot(x)
|
| 174 |
+
|
| 175 |
+
def dot(self, x):
|
| 176 |
+
"""Matrix-matrix or matrix-vector multiplication.
|
| 177 |
+
"""
|
| 178 |
+
if isinstance(x, LinearOperator):
|
| 179 |
+
return _ProductLinearOperator(self, x)
|
| 180 |
+
elif cupy.isscalar(x):
|
| 181 |
+
return _ScaledLinearOperator(self, x)
|
| 182 |
+
else:
|
| 183 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
|
| 184 |
+
return self.matvec(x)
|
| 185 |
+
elif x.ndim == 2:
|
| 186 |
+
return self.matmat(x)
|
| 187 |
+
else:
|
| 188 |
+
raise ValueError('expected 1-d or 2-d array, got %r'
|
| 189 |
+
% x)
|
| 190 |
+
|
| 191 |
+
def __matmul__(self, other):
|
| 192 |
+
if cupy.isscalar(other):
|
| 193 |
+
raise ValueError('Scalar operands are not allowed, '
|
| 194 |
+
'use \'*\' instead')
|
| 195 |
+
return self.__mul__(other)
|
| 196 |
+
|
| 197 |
+
def __rmatmul__(self, other):
|
| 198 |
+
if cupy.isscalar(other):
|
| 199 |
+
raise ValueError('Scalar operands are not allowed, '
|
| 200 |
+
'use \'*\' instead')
|
| 201 |
+
return self.__rmul__(other)
|
| 202 |
+
|
| 203 |
+
def __rmul__(self, x):
|
| 204 |
+
if cupy.isscalar(x):
|
| 205 |
+
return _ScaledLinearOperator(self, x)
|
| 206 |
+
else:
|
| 207 |
+
return NotImplemented
|
| 208 |
+
|
| 209 |
+
def __pow__(self, p):
|
| 210 |
+
if cupy.isscalar(p):
|
| 211 |
+
return _PowerLinearOperator(self, p)
|
| 212 |
+
else:
|
| 213 |
+
return NotImplemented
|
| 214 |
+
|
| 215 |
+
def __add__(self, x):
|
| 216 |
+
if isinstance(x, LinearOperator):
|
| 217 |
+
return _SumLinearOperator(self, x)
|
| 218 |
+
else:
|
| 219 |
+
return NotImplemented
|
| 220 |
+
|
| 221 |
+
def __neg__(self):
|
| 222 |
+
return _ScaledLinearOperator(self, -1)
|
| 223 |
+
|
| 224 |
+
def __sub__(self, x):
|
| 225 |
+
return self.__add__(-x)
|
| 226 |
+
|
| 227 |
+
def __repr__(self):
|
| 228 |
+
M, N = self.shape
|
| 229 |
+
if self.dtype is None:
|
| 230 |
+
dt = 'unspecified dtype'
|
| 231 |
+
else:
|
| 232 |
+
dt = 'dtype=' + str(self.dtype)
|
| 233 |
+
|
| 234 |
+
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
|
| 235 |
+
|
| 236 |
+
def adjoint(self):
|
| 237 |
+
"""Hermitian adjoint.
|
| 238 |
+
"""
|
| 239 |
+
return self._adjoint()
|
| 240 |
+
|
| 241 |
+
H = property(adjoint)
|
| 242 |
+
|
| 243 |
+
def transpose(self):
|
| 244 |
+
"""Transpose this linear operator.
|
| 245 |
+
"""
|
| 246 |
+
return self._transpose()
|
| 247 |
+
|
| 248 |
+
T = property(transpose)
|
| 249 |
+
|
| 250 |
+
def _adjoint(self):
|
| 251 |
+
"""Default implementation of _adjoint; defers to rmatvec."""
|
| 252 |
+
return _AdjointLinearOperator(self)
|
| 253 |
+
|
| 254 |
+
def _transpose(self):
|
| 255 |
+
""" Default implementation of _transpose; defers to rmatvec + conj"""
|
| 256 |
+
return _TransposedLinearOperator(self)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
class _CustomLinearOperator(LinearOperator):
|
| 260 |
+
"""Linear operator defined in terms of user-specified operations."""
|
| 261 |
+
|
| 262 |
+
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
|
| 263 |
+
dtype=None, rmatmat=None):
|
| 264 |
+
super(_CustomLinearOperator, self).__init__(dtype, shape)
|
| 265 |
+
|
| 266 |
+
self.args = ()
|
| 267 |
+
|
| 268 |
+
self.__matvec_impl = matvec
|
| 269 |
+
self.__rmatvec_impl = rmatvec
|
| 270 |
+
self.__rmatmat_impl = rmatmat
|
| 271 |
+
self.__matmat_impl = matmat
|
| 272 |
+
|
| 273 |
+
self._init_dtype()
|
| 274 |
+
|
| 275 |
+
def _matmat(self, X):
|
| 276 |
+
if self.__matmat_impl is not None:
|
| 277 |
+
return self.__matmat_impl(X)
|
| 278 |
+
else:
|
| 279 |
+
return super(_CustomLinearOperator, self)._matmat(X)
|
| 280 |
+
|
| 281 |
+
def _matvec(self, x):
|
| 282 |
+
return self.__matvec_impl(x)
|
| 283 |
+
|
| 284 |
+
def _rmatvec(self, x):
|
| 285 |
+
func = self.__rmatvec_impl
|
| 286 |
+
if func is None:
|
| 287 |
+
raise NotImplementedError('rmatvec is not defined')
|
| 288 |
+
return self.__rmatvec_impl(x)
|
| 289 |
+
|
| 290 |
+
def _rmatmat(self, X):
|
| 291 |
+
if self.__rmatmat_impl is not None:
|
| 292 |
+
return self.__rmatmat_impl(X)
|
| 293 |
+
else:
|
| 294 |
+
return super(_CustomLinearOperator, self)._rmatmat(X)
|
| 295 |
+
|
| 296 |
+
def _adjoint(self):
|
| 297 |
+
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
|
| 298 |
+
matvec=self.__rmatvec_impl,
|
| 299 |
+
rmatvec=self.__matvec_impl,
|
| 300 |
+
matmat=self.__rmatmat_impl,
|
| 301 |
+
rmatmat=self.__matmat_impl,
|
| 302 |
+
dtype=self.dtype)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class _AdjointLinearOperator(LinearOperator):
|
| 306 |
+
"""Adjoint of arbitrary Linear Operator"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, A):
|
| 309 |
+
shape = (A.shape[1], A.shape[0])
|
| 310 |
+
super(_AdjointLinearOperator, self).__init__(
|
| 311 |
+
dtype=A.dtype, shape=shape)
|
| 312 |
+
self.A = A
|
| 313 |
+
self.args = (A,)
|
| 314 |
+
|
| 315 |
+
def _matvec(self, x):
|
| 316 |
+
return self.A._rmatvec(x)
|
| 317 |
+
|
| 318 |
+
def _rmatvec(self, x):
|
| 319 |
+
return self.A._matvec(x)
|
| 320 |
+
|
| 321 |
+
def _matmat(self, x):
|
| 322 |
+
return self.A._rmatmat(x)
|
| 323 |
+
|
| 324 |
+
def _rmatmat(self, x):
|
| 325 |
+
return self.A._matmat(x)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
class _TransposedLinearOperator(LinearOperator):
|
| 329 |
+
"""Transposition of arbitrary Linear Operator"""
|
| 330 |
+
|
| 331 |
+
def __init__(self, A):
|
| 332 |
+
shape = (A.shape[1], A.shape[0])
|
| 333 |
+
super(_TransposedLinearOperator, self).__init__(
|
| 334 |
+
dtype=A.dtype, shape=shape)
|
| 335 |
+
self.A = A
|
| 336 |
+
self.args = (A,)
|
| 337 |
+
|
| 338 |
+
def _matvec(self, x):
|
| 339 |
+
# NB. cupy.conj works also on sparse matrices
|
| 340 |
+
return cupy.conj(self.A._rmatvec(cupy.conj(x)))
|
| 341 |
+
|
| 342 |
+
def _rmatvec(self, x):
|
| 343 |
+
return cupy.conj(self.A._matvec(cupy.conj(x)))
|
| 344 |
+
|
| 345 |
+
def _matmat(self, x):
|
| 346 |
+
# NB. cupy.conj works also on sparse matrices
|
| 347 |
+
return cupy.conj(self.A._rmatmat(cupy.conj(x)))
|
| 348 |
+
|
| 349 |
+
def _rmatmat(self, x):
|
| 350 |
+
return cupy.conj(self.A._matmat(cupy.conj(x)))
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _get_dtype(operators, dtypes=None):
|
| 354 |
+
if dtypes is None:
|
| 355 |
+
dtypes = []
|
| 356 |
+
for obj in operators:
|
| 357 |
+
if obj is not None and hasattr(obj, 'dtype'):
|
| 358 |
+
dtypes.append(obj.dtype)
|
| 359 |
+
return cupy.result_type(*dtypes)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
class _SumLinearOperator(LinearOperator):
|
| 363 |
+
def __init__(self, A, B):
|
| 364 |
+
if not isinstance(A, LinearOperator) or \
|
| 365 |
+
not isinstance(B, LinearOperator):
|
| 366 |
+
raise ValueError('both operands have to be a LinearOperator')
|
| 367 |
+
if A.shape != B.shape:
|
| 368 |
+
raise ValueError('cannot add %r and %r: shape mismatch'
|
| 369 |
+
% (A, B))
|
| 370 |
+
self.args = (A, B)
|
| 371 |
+
super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape)
|
| 372 |
+
|
| 373 |
+
def _matvec(self, x):
|
| 374 |
+
return self.args[0].matvec(x) + self.args[1].matvec(x)
|
| 375 |
+
|
| 376 |
+
def _rmatvec(self, x):
|
| 377 |
+
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
|
| 378 |
+
|
| 379 |
+
def _rmatmat(self, x):
|
| 380 |
+
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
|
| 381 |
+
|
| 382 |
+
def _matmat(self, x):
|
| 383 |
+
return self.args[0].matmat(x) + self.args[1].matmat(x)
|
| 384 |
+
|
| 385 |
+
def _adjoint(self):
|
| 386 |
+
A, B = self.args
|
| 387 |
+
return A.H + B.H
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class _ProductLinearOperator(LinearOperator):
|
| 391 |
+
def __init__(self, A, B):
|
| 392 |
+
if not isinstance(A, LinearOperator) or \
|
| 393 |
+
not isinstance(B, LinearOperator):
|
| 394 |
+
raise ValueError('both operands have to be a LinearOperator')
|
| 395 |
+
if A.shape[1] != B.shape[0]:
|
| 396 |
+
raise ValueError('cannot multiply %r and %r: shape mismatch'
|
| 397 |
+
% (A, B))
|
| 398 |
+
super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]),
|
| 399 |
+
(A.shape[0], B.shape[1]))
|
| 400 |
+
self.args = (A, B)
|
| 401 |
+
|
| 402 |
+
def _matvec(self, x):
|
| 403 |
+
return self.args[0].matvec(self.args[1].matvec(x))
|
| 404 |
+
|
| 405 |
+
def _rmatvec(self, x):
|
| 406 |
+
return self.args[1].rmatvec(self.args[0].rmatvec(x))
|
| 407 |
+
|
| 408 |
+
def _rmatmat(self, x):
|
| 409 |
+
return self.args[1].rmatmat(self.args[0].rmatmat(x))
|
| 410 |
+
|
| 411 |
+
def _matmat(self, x):
|
| 412 |
+
return self.args[0].matmat(self.args[1].matmat(x))
|
| 413 |
+
|
| 414 |
+
def _adjoint(self):
|
| 415 |
+
A, B = self.args
|
| 416 |
+
return B.H * A.H
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
class _ScaledLinearOperator(LinearOperator):
|
| 420 |
+
def __init__(self, A, alpha):
|
| 421 |
+
if not isinstance(A, LinearOperator):
|
| 422 |
+
raise ValueError('LinearOperator expected as A')
|
| 423 |
+
if not cupy.isscalar(alpha):
|
| 424 |
+
raise ValueError('scalar expected as alpha')
|
| 425 |
+
dtype = _get_dtype([A], [type(alpha)])
|
| 426 |
+
super(_ScaledLinearOperator, self).__init__(dtype, A.shape)
|
| 427 |
+
self.args = (A, alpha)
|
| 428 |
+
|
| 429 |
+
def _matvec(self, x):
|
| 430 |
+
return self.args[1] * self.args[0].matvec(x)
|
| 431 |
+
|
| 432 |
+
def _rmatvec(self, x):
|
| 433 |
+
return cupy.conj(self.args[1]) * self.args[0].rmatvec(x)
|
| 434 |
+
|
| 435 |
+
def _rmatmat(self, x):
|
| 436 |
+
return cupy.conj(self.args[1]) * self.args[0].rmatmat(x)
|
| 437 |
+
|
| 438 |
+
def _matmat(self, x):
|
| 439 |
+
return self.args[1] * self.args[0].matmat(x)
|
| 440 |
+
|
| 441 |
+
def _adjoint(self):
|
| 442 |
+
A, alpha = self.args
|
| 443 |
+
return A.H * cupy.conj(alpha)
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
class _PowerLinearOperator(LinearOperator):
|
| 447 |
+
def __init__(self, A, p):
|
| 448 |
+
if not isinstance(A, LinearOperator):
|
| 449 |
+
raise ValueError('LinearOperator expected as A')
|
| 450 |
+
if A.shape[0] != A.shape[1]:
|
| 451 |
+
raise ValueError('square LinearOperator expected, got %r' % A)
|
| 452 |
+
if not _util.isintlike(p) or p < 0:
|
| 453 |
+
raise ValueError('non-negative integer expected as p')
|
| 454 |
+
|
| 455 |
+
super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape)
|
| 456 |
+
self.args = (A, p)
|
| 457 |
+
|
| 458 |
+
def _power(self, fun, x):
|
| 459 |
+
res = cupy.array(x, copy=True)
|
| 460 |
+
for i in range(self.args[1]):
|
| 461 |
+
res = fun(res)
|
| 462 |
+
return res
|
| 463 |
+
|
| 464 |
+
def _matvec(self, x):
|
| 465 |
+
return self._power(self.args[0].matvec, x)
|
| 466 |
+
|
| 467 |
+
def _rmatvec(self, x):
|
| 468 |
+
return self._power(self.args[0].rmatvec, x)
|
| 469 |
+
|
| 470 |
+
def _rmatmat(self, x):
|
| 471 |
+
return self._power(self.args[0].rmatmat, x)
|
| 472 |
+
|
| 473 |
+
def _matmat(self, x):
|
| 474 |
+
return self._power(self.args[0].matmat, x)
|
| 475 |
+
|
| 476 |
+
def _adjoint(self):
|
| 477 |
+
A, p = self.args
|
| 478 |
+
return A.H ** p
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
class MatrixLinearOperator(LinearOperator):
|
| 482 |
+
def __init__(self, A):
|
| 483 |
+
super(MatrixLinearOperator, self).__init__(A.dtype, A.shape)
|
| 484 |
+
self.A = A
|
| 485 |
+
self.__adj = None
|
| 486 |
+
self.args = (A,)
|
| 487 |
+
|
| 488 |
+
def _matmat(self, X):
|
| 489 |
+
return self.A.dot(X)
|
| 490 |
+
|
| 491 |
+
def _adjoint(self):
|
| 492 |
+
if self.__adj is None:
|
| 493 |
+
self.__adj = _AdjointMatrixOperator(self)
|
| 494 |
+
return self.__adj
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class _AdjointMatrixOperator(MatrixLinearOperator):
|
| 498 |
+
def __init__(self, adjoint):
|
| 499 |
+
self.A = adjoint.A.T.conj()
|
| 500 |
+
self.__adjoint = adjoint
|
| 501 |
+
self.args = (adjoint,)
|
| 502 |
+
self.shape = adjoint.shape[1], adjoint.shape[0]
|
| 503 |
+
|
| 504 |
+
@property
|
| 505 |
+
def dtype(self):
|
| 506 |
+
return self.__adjoint.dtype
|
| 507 |
+
|
| 508 |
+
def _adjoint(self):
|
| 509 |
+
return self.__adjoint
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
class IdentityOperator(LinearOperator):
|
| 513 |
+
def __init__(self, shape, dtype=None):
|
| 514 |
+
super(IdentityOperator, self).__init__(dtype, shape)
|
| 515 |
+
|
| 516 |
+
def _matvec(self, x):
|
| 517 |
+
return x
|
| 518 |
+
|
| 519 |
+
def _rmatvec(self, x):
|
| 520 |
+
return x
|
| 521 |
+
|
| 522 |
+
def _rmatmat(self, x):
|
| 523 |
+
return x
|
| 524 |
+
|
| 525 |
+
def _matmat(self, x):
|
| 526 |
+
return x
|
| 527 |
+
|
| 528 |
+
def _adjoint(self):
|
| 529 |
+
return self
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def aslinearoperator(A):
|
| 533 |
+
"""Return `A` as a LinearOperator.
|
| 534 |
+
|
| 535 |
+
Args:
|
| 536 |
+
A (array-like):
|
| 537 |
+
The input array to be converted to a `LinearOperator` object.
|
| 538 |
+
It may be any of the following types:
|
| 539 |
+
|
| 540 |
+
* :class:`cupy.ndarray`
|
| 541 |
+
* sparse matrix (e.g. ``csr_matrix``, ``coo_matrix``, etc.)
|
| 542 |
+
* :class:`cupyx.scipy.sparse.linalg.LinearOperator`
|
| 543 |
+
* object with ``.shape`` and ``.matvec`` attributes
|
| 544 |
+
|
| 545 |
+
Returns:
|
| 546 |
+
cupyx.scipy.sparse.linalg.LinearOperator: `LinearOperator` object
|
| 547 |
+
|
| 548 |
+
.. seealso:: :func:`scipy.sparse.aslinearoperator``
|
| 549 |
+
"""
|
| 550 |
+
if isinstance(A, LinearOperator):
|
| 551 |
+
return A
|
| 552 |
+
|
| 553 |
+
elif isinstance(A, cupy.ndarray):
|
| 554 |
+
if A.ndim > 2:
|
| 555 |
+
raise ValueError('array must have ndim <= 2')
|
| 556 |
+
A = cupy.atleast_2d(A)
|
| 557 |
+
return MatrixLinearOperator(A)
|
| 558 |
+
|
| 559 |
+
elif sparse.isspmatrix(A):
|
| 560 |
+
return MatrixLinearOperator(A)
|
| 561 |
+
|
| 562 |
+
else:
|
| 563 |
+
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
|
| 564 |
+
rmatvec = None
|
| 565 |
+
rmatmat = None
|
| 566 |
+
dtype = None
|
| 567 |
+
|
| 568 |
+
if hasattr(A, 'rmatvec'):
|
| 569 |
+
rmatvec = A.rmatvec
|
| 570 |
+
if hasattr(A, 'rmatmat'):
|
| 571 |
+
rmatmat = A.rmatmat
|
| 572 |
+
if hasattr(A, 'dtype'):
|
| 573 |
+
dtype = A.dtype
|
| 574 |
+
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
|
| 575 |
+
rmatmat=rmatmat, dtype=dtype)
|
| 576 |
+
|
| 577 |
+
else:
|
| 578 |
+
raise TypeError('type not understood')
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_iterative.py
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
|
| 3 |
+
import cupy
|
| 4 |
+
from cupy import cublas
|
| 5 |
+
from cupy._core import _dtype
|
| 6 |
+
from cupy.cuda import device
|
| 7 |
+
from cupy_backends.cuda.libs import cublas as _cublas
|
| 8 |
+
from cupyx.scipy.sparse import _csr
|
| 9 |
+
from cupyx.scipy.sparse.linalg import _interface
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None,
|
| 13 |
+
atol=None):
|
| 14 |
+
"""Uses Conjugate Gradient iteration to solve ``Ax = b``.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
A (ndarray, spmatrix or LinearOperator): The real or complex matrix of
|
| 18 |
+
the linear system with shape ``(n, n)``. ``A`` must be a hermitian,
|
| 19 |
+
positive definitive matrix with type of :class:`cupy.ndarray`,
|
| 20 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 21 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 22 |
+
b (cupy.ndarray): Right hand side of the linear system with shape
|
| 23 |
+
``(n,)`` or ``(n, 1)``.
|
| 24 |
+
x0 (cupy.ndarray): Starting guess for the solution.
|
| 25 |
+
tol (float): Tolerance for convergence.
|
| 26 |
+
maxiter (int): Maximum number of iterations.
|
| 27 |
+
M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``.
|
| 28 |
+
The preconditioner should approximate the inverse of ``A``.
|
| 29 |
+
``M`` must be :class:`cupy.ndarray`,
|
| 30 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 31 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 32 |
+
callback (function): User-specified function to call after each
|
| 33 |
+
iteration. It is called as ``callback(xk)``, where ``xk`` is the
|
| 34 |
+
current solution vector.
|
| 35 |
+
atol (float): Tolerance for convergence.
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
tuple:
|
| 39 |
+
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
|
| 40 |
+
the converged solution and ``info`` provides convergence
|
| 41 |
+
information.
|
| 42 |
+
|
| 43 |
+
.. seealso:: :func:`scipy.sparse.linalg.cg`
|
| 44 |
+
"""
|
| 45 |
+
A, M, x, b = _make_system(A, M, x0, b)
|
| 46 |
+
matvec = A.matvec
|
| 47 |
+
psolve = M.matvec
|
| 48 |
+
|
| 49 |
+
n = A.shape[0]
|
| 50 |
+
if maxiter is None:
|
| 51 |
+
maxiter = n * 10
|
| 52 |
+
if n == 0:
|
| 53 |
+
return cupy.empty_like(b), 0
|
| 54 |
+
b_norm = cupy.linalg.norm(b)
|
| 55 |
+
if b_norm == 0:
|
| 56 |
+
return b, 0
|
| 57 |
+
if atol is None:
|
| 58 |
+
atol = tol * float(b_norm)
|
| 59 |
+
else:
|
| 60 |
+
atol = max(float(atol), tol * float(b_norm))
|
| 61 |
+
|
| 62 |
+
r = b - matvec(x)
|
| 63 |
+
iters = 0
|
| 64 |
+
rho = 0
|
| 65 |
+
while iters < maxiter:
|
| 66 |
+
z = psolve(r)
|
| 67 |
+
rho1 = rho
|
| 68 |
+
rho = cublas.dotc(r, z)
|
| 69 |
+
if iters == 0:
|
| 70 |
+
p = z
|
| 71 |
+
else:
|
| 72 |
+
beta = rho / rho1
|
| 73 |
+
p = z + beta * p
|
| 74 |
+
q = matvec(p)
|
| 75 |
+
alpha = rho / cublas.dotc(p, q)
|
| 76 |
+
x = x + alpha * p
|
| 77 |
+
r = r - alpha * q
|
| 78 |
+
iters += 1
|
| 79 |
+
if callback is not None:
|
| 80 |
+
callback(x)
|
| 81 |
+
resid = cublas.nrm2(r)
|
| 82 |
+
if resid <= atol:
|
| 83 |
+
break
|
| 84 |
+
|
| 85 |
+
info = 0
|
| 86 |
+
if iters == maxiter and not (resid <= atol):
|
| 87 |
+
info = iters
|
| 88 |
+
|
| 89 |
+
return x, info
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
|
| 93 |
+
callback=None, atol=None, callback_type=None):
|
| 94 |
+
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
A (ndarray, spmatrix or LinearOperator): The real or complex
|
| 98 |
+
matrix of the linear system with shape ``(n, n)``. ``A`` must be
|
| 99 |
+
:class:`cupy.ndarray`, :class:`cupyx.scipy.sparse.spmatrix` or
|
| 100 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 101 |
+
b (cupy.ndarray): Right hand side of the linear system with shape
|
| 102 |
+
``(n,)`` or ``(n, 1)``.
|
| 103 |
+
x0 (cupy.ndarray): Starting guess for the solution.
|
| 104 |
+
tol (float): Tolerance for convergence.
|
| 105 |
+
restart (int): Number of iterations between restarts. Larger values
|
| 106 |
+
increase iteration cost, but may be necessary for convergence.
|
| 107 |
+
maxiter (int): Maximum number of iterations.
|
| 108 |
+
M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``.
|
| 109 |
+
The preconditioner should approximate the inverse of ``A``.
|
| 110 |
+
``M`` must be :class:`cupy.ndarray`,
|
| 111 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 112 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 113 |
+
callback (function): User-specified function to call on every restart.
|
| 114 |
+
It is called as ``callback(arg)``, where ``arg`` is selected by
|
| 115 |
+
``callback_type``.
|
| 116 |
+
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
|
| 117 |
+
vector is used as an argument of callback function. if 'pr_norm',
|
| 118 |
+
relative (preconditioned) residual norm is used as an argument.
|
| 119 |
+
atol (float): Tolerance for convergence.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
tuple:
|
| 123 |
+
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
|
| 124 |
+
the converged solution and ``info`` provides convergence
|
| 125 |
+
information.
|
| 126 |
+
|
| 127 |
+
Reference:
|
| 128 |
+
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
|
| 129 |
+
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
|
| 130 |
+
|
| 131 |
+
.. seealso:: :func:`scipy.sparse.linalg.gmres`
|
| 132 |
+
"""
|
| 133 |
+
A, M, x, b = _make_system(A, M, x0, b)
|
| 134 |
+
matvec = A.matvec
|
| 135 |
+
psolve = M.matvec
|
| 136 |
+
|
| 137 |
+
n = A.shape[0]
|
| 138 |
+
if n == 0:
|
| 139 |
+
return cupy.empty_like(b), 0
|
| 140 |
+
b_norm = cupy.linalg.norm(b)
|
| 141 |
+
if b_norm == 0:
|
| 142 |
+
return b, 0
|
| 143 |
+
if atol is None:
|
| 144 |
+
atol = tol * float(b_norm)
|
| 145 |
+
else:
|
| 146 |
+
atol = max(float(atol), tol * float(b_norm))
|
| 147 |
+
if maxiter is None:
|
| 148 |
+
maxiter = n * 10
|
| 149 |
+
if restart is None:
|
| 150 |
+
restart = 20
|
| 151 |
+
restart = min(restart, n)
|
| 152 |
+
if callback_type is None:
|
| 153 |
+
callback_type = 'pr_norm'
|
| 154 |
+
if callback_type not in ('x', 'pr_norm'):
|
| 155 |
+
raise ValueError('Unknown callback_type: {}'.format(callback_type))
|
| 156 |
+
if callback is None:
|
| 157 |
+
callback_type = None
|
| 158 |
+
|
| 159 |
+
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
|
| 160 |
+
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
|
| 161 |
+
e = numpy.zeros((restart+1,), dtype=A.dtype)
|
| 162 |
+
|
| 163 |
+
compute_hu = _make_compute_hu(V)
|
| 164 |
+
|
| 165 |
+
iters = 0
|
| 166 |
+
while True:
|
| 167 |
+
mx = psolve(x)
|
| 168 |
+
r = b - matvec(mx)
|
| 169 |
+
r_norm = cublas.nrm2(r)
|
| 170 |
+
if callback_type == 'x':
|
| 171 |
+
callback(mx)
|
| 172 |
+
elif callback_type == 'pr_norm' and iters > 0:
|
| 173 |
+
callback(r_norm / b_norm)
|
| 174 |
+
if r_norm <= atol or iters >= maxiter:
|
| 175 |
+
break
|
| 176 |
+
v = r / r_norm
|
| 177 |
+
V[:, 0] = v
|
| 178 |
+
e[0] = r_norm
|
| 179 |
+
|
| 180 |
+
# Arnoldi iteration
|
| 181 |
+
for j in range(restart):
|
| 182 |
+
z = psolve(v)
|
| 183 |
+
u = matvec(z)
|
| 184 |
+
H[:j+1, j], u = compute_hu(u, j)
|
| 185 |
+
cublas.nrm2(u, out=H[j+1, j])
|
| 186 |
+
if j+1 < restart:
|
| 187 |
+
v = u / H[j+1, j]
|
| 188 |
+
V[:, j+1] = v
|
| 189 |
+
|
| 190 |
+
# Note: The least-square solution to equation Hy = e is computed on CPU
|
| 191 |
+
# because it is faster if the matrix size is small.
|
| 192 |
+
ret = numpy.linalg.lstsq(cupy.asnumpy(H), e)
|
| 193 |
+
y = cupy.array(ret[0])
|
| 194 |
+
x += V @ y
|
| 195 |
+
iters += restart
|
| 196 |
+
|
| 197 |
+
info = 0
|
| 198 |
+
if iters == maxiter and not (r_norm <= atol):
|
| 199 |
+
info = iters
|
| 200 |
+
return mx, info
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None,
|
| 204 |
+
atol=None):
|
| 205 |
+
"""Use Conjugate Gradient Squared iteration to solve ``Ax = b``.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
A (ndarray, spmatrix or LinearOperator): The real or complex matrix of
|
| 209 |
+
the linear system with shape ``(n, n)``.
|
| 210 |
+
b (cupy.ndarray): Right hand side of the linear system with shape
|
| 211 |
+
``(n,)`` or ``(n, 1)``.
|
| 212 |
+
x0 (cupy.ndarray): Starting guess for the solution.
|
| 213 |
+
tol (float): Tolerance for convergence.
|
| 214 |
+
maxiter (int): Maximum number of iterations.
|
| 215 |
+
M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``.
|
| 216 |
+
The preconditioner should approximate the inverse of ``A``.
|
| 217 |
+
``M`` must be :class:`cupy.ndarray`,
|
| 218 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 219 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 220 |
+
callback (function): User-specified function to call after each
|
| 221 |
+
iteration. It is called as ``callback(xk)``, where ``xk`` is the
|
| 222 |
+
current solution vector.
|
| 223 |
+
atol (float): Tolerance for convergence.
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
tuple:
|
| 227 |
+
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
|
| 228 |
+
the converged solution and ``info`` provides convergence
|
| 229 |
+
information.
|
| 230 |
+
|
| 231 |
+
.. seealso:: :func:`scipy.sparse.linalg.cgs`
|
| 232 |
+
"""
|
| 233 |
+
A, M, x, b = _make_system(A, M, x0, b)
|
| 234 |
+
|
| 235 |
+
matvec = A.matvec
|
| 236 |
+
psolve = M.matvec
|
| 237 |
+
|
| 238 |
+
n = A.shape[0]
|
| 239 |
+
if n == 0:
|
| 240 |
+
return cupy.empty_like(b), 0
|
| 241 |
+
b_norm = cupy.linalg.norm(b)
|
| 242 |
+
if b_norm == 0:
|
| 243 |
+
return b, 0
|
| 244 |
+
if atol is None:
|
| 245 |
+
atol = tol * float(b_norm)
|
| 246 |
+
else:
|
| 247 |
+
atol = max(float(atol), tol * float(b_norm))
|
| 248 |
+
if maxiter is None:
|
| 249 |
+
maxiter = n * 5
|
| 250 |
+
|
| 251 |
+
r0 = b - matvec(x)
|
| 252 |
+
|
| 253 |
+
rho = cupy.dot(r0, r0)
|
| 254 |
+
|
| 255 |
+
# initialise vectors
|
| 256 |
+
r = r0.copy()
|
| 257 |
+
u = r0
|
| 258 |
+
p = r0.copy()
|
| 259 |
+
|
| 260 |
+
iters = 0
|
| 261 |
+
while True:
|
| 262 |
+
y = psolve(p)
|
| 263 |
+
v = matvec(y)
|
| 264 |
+
sigma = cupy.dot(r0, v)
|
| 265 |
+
alpha = rho / sigma
|
| 266 |
+
q = u - alpha * v
|
| 267 |
+
|
| 268 |
+
z = psolve(u + q)
|
| 269 |
+
x += alpha * z
|
| 270 |
+
Az = matvec(z)
|
| 271 |
+
r -= alpha * Az
|
| 272 |
+
|
| 273 |
+
# Update residual norm and check convergence
|
| 274 |
+
r_norm = cupy.linalg.norm(r)
|
| 275 |
+
|
| 276 |
+
iters += 1
|
| 277 |
+
if callback is not None:
|
| 278 |
+
callback(x)
|
| 279 |
+
|
| 280 |
+
if r_norm <= atol or iters >= maxiter:
|
| 281 |
+
break
|
| 282 |
+
|
| 283 |
+
rho_new = cupy.dot(r0, r)
|
| 284 |
+
beta = rho_new / rho
|
| 285 |
+
rho = rho_new
|
| 286 |
+
u = r + beta * q
|
| 287 |
+
p *= beta
|
| 288 |
+
p += q
|
| 289 |
+
p *= beta
|
| 290 |
+
p += u
|
| 291 |
+
|
| 292 |
+
info = 0
|
| 293 |
+
if iters == maxiter and not (r_norm < atol):
|
| 294 |
+
info = iters
|
| 295 |
+
|
| 296 |
+
return x, info
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def _make_system(A, M, x0, b):
|
| 300 |
+
"""Make a linear system Ax = b
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix or
|
| 304 |
+
cupyx.scipy.sparse.LinearOperator): sparse or dense matrix.
|
| 305 |
+
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix or
|
| 306 |
+
cupyx.scipy.sparse.LinearOperator): preconditioner.
|
| 307 |
+
x0 (cupy.ndarray): initial guess to iterative method.
|
| 308 |
+
b (cupy.ndarray): right hand side.
|
| 309 |
+
|
| 310 |
+
Returns:
|
| 311 |
+
tuple:
|
| 312 |
+
It returns (A, M, x, b).
|
| 313 |
+
A (LinaerOperator): matrix of linear system
|
| 314 |
+
M (LinearOperator): preconditioner
|
| 315 |
+
x (cupy.ndarray): initial guess
|
| 316 |
+
b (cupy.ndarray): right hand side.
|
| 317 |
+
"""
|
| 318 |
+
fast_matvec = _make_fast_matvec(A)
|
| 319 |
+
A = _interface.aslinearoperator(A)
|
| 320 |
+
if fast_matvec is not None:
|
| 321 |
+
A = _interface.LinearOperator(A.shape, matvec=fast_matvec,
|
| 322 |
+
rmatvec=A.rmatvec, dtype=A.dtype)
|
| 323 |
+
if A.shape[0] != A.shape[1]:
|
| 324 |
+
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
|
| 325 |
+
if A.dtype.char not in 'fdFD':
|
| 326 |
+
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
|
| 327 |
+
n = A.shape[0]
|
| 328 |
+
if not (b.shape == (n,) or b.shape == (n, 1)):
|
| 329 |
+
raise ValueError('b has incompatible dimensions')
|
| 330 |
+
b = b.astype(A.dtype).ravel()
|
| 331 |
+
if x0 is None:
|
| 332 |
+
x = cupy.zeros((n,), dtype=A.dtype)
|
| 333 |
+
else:
|
| 334 |
+
if not (x0.shape == (n,) or x0.shape == (n, 1)):
|
| 335 |
+
raise ValueError('x0 has incompatible dimensions')
|
| 336 |
+
x = x0.astype(A.dtype).ravel()
|
| 337 |
+
if M is None:
|
| 338 |
+
M = _interface.IdentityOperator(shape=A.shape, dtype=A.dtype)
|
| 339 |
+
else:
|
| 340 |
+
fast_matvec = _make_fast_matvec(M)
|
| 341 |
+
M = _interface.aslinearoperator(M)
|
| 342 |
+
if fast_matvec is not None:
|
| 343 |
+
M = _interface.LinearOperator(M.shape, matvec=fast_matvec,
|
| 344 |
+
rmatvec=M.rmatvec, dtype=M.dtype)
|
| 345 |
+
if A.shape != M.shape:
|
| 346 |
+
raise ValueError('matrix and preconditioner have different shapes')
|
| 347 |
+
return A, M, x, b
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _make_fast_matvec(A):
|
| 351 |
+
from cupy_backends.cuda.libs import cusparse as _cusparse
|
| 352 |
+
from cupyx import cusparse
|
| 353 |
+
|
| 354 |
+
if _csr.isspmatrix_csr(A) and cusparse.check_availability('spmv'):
|
| 355 |
+
handle = device.get_cusparse_handle()
|
| 356 |
+
op_a = _cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE
|
| 357 |
+
alpha = numpy.array(1.0, A.dtype)
|
| 358 |
+
beta = numpy.array(0.0, A.dtype)
|
| 359 |
+
cuda_dtype = _dtype.to_cuda_dtype(A.dtype)
|
| 360 |
+
alg = _cusparse.CUSPARSE_MV_ALG_DEFAULT
|
| 361 |
+
x = cupy.empty((A.shape[0],), dtype=A.dtype)
|
| 362 |
+
y = cupy.empty((A.shape[0],), dtype=A.dtype)
|
| 363 |
+
desc_A = cusparse.SpMatDescriptor.create(A)
|
| 364 |
+
desc_x = cusparse.DnVecDescriptor.create(x)
|
| 365 |
+
desc_y = cusparse.DnVecDescriptor.create(y)
|
| 366 |
+
buff_size = _cusparse.spMV_bufferSize(
|
| 367 |
+
handle, op_a, alpha.ctypes.data, desc_A.desc, desc_x.desc,
|
| 368 |
+
beta.ctypes.data, desc_y.desc, cuda_dtype, alg)
|
| 369 |
+
buff = cupy.empty(buff_size, cupy.int8)
|
| 370 |
+
del x, desc_x, y, desc_y
|
| 371 |
+
|
| 372 |
+
def matvec(x):
|
| 373 |
+
y = cupy.empty_like(x)
|
| 374 |
+
desc_x = cusparse.DnVecDescriptor.create(x)
|
| 375 |
+
desc_y = cusparse.DnVecDescriptor.create(y)
|
| 376 |
+
_cusparse.spMV(
|
| 377 |
+
handle, op_a, alpha.ctypes.data, desc_A.desc, desc_x.desc,
|
| 378 |
+
beta.ctypes.data, desc_y.desc, cuda_dtype, alg, buff.data.ptr)
|
| 379 |
+
return y
|
| 380 |
+
|
| 381 |
+
return matvec
|
| 382 |
+
return None
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def _make_compute_hu(V):
|
| 386 |
+
handle = device.get_cublas_handle()
|
| 387 |
+
if V.dtype.char == 'f':
|
| 388 |
+
gemv = _cublas.sgemv
|
| 389 |
+
elif V.dtype.char == 'd':
|
| 390 |
+
gemv = _cublas.dgemv
|
| 391 |
+
elif V.dtype.char == 'F':
|
| 392 |
+
gemv = _cublas.cgemv
|
| 393 |
+
elif V.dtype.char == 'D':
|
| 394 |
+
gemv = _cublas.zgemv
|
| 395 |
+
n = V.shape[0]
|
| 396 |
+
one = numpy.array(1.0, V.dtype)
|
| 397 |
+
zero = numpy.array(0.0, V.dtype)
|
| 398 |
+
mone = numpy.array(-1.0, V.dtype)
|
| 399 |
+
|
| 400 |
+
def compute_hu(u, j):
|
| 401 |
+
# h = V[:, :j+1].conj().T @ u
|
| 402 |
+
# u -= V[:, :j+1] @ h
|
| 403 |
+
h = cupy.empty((j+1,), dtype=V.dtype)
|
| 404 |
+
gemv(handle, _cublas.CUBLAS_OP_C, n, j+1, one.ctypes.data, V.data.ptr,
|
| 405 |
+
n, u.data.ptr, 1, zero.ctypes.data, h.data.ptr, 1)
|
| 406 |
+
gemv(handle, _cublas.CUBLAS_OP_N, n, j+1, mone.ctypes.data, V.data.ptr,
|
| 407 |
+
n, h.data.ptr, 1, one.ctypes.data, u.data.ptr, 1)
|
| 408 |
+
return h, u
|
| 409 |
+
return compute_hu
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_lobpcg.py
ADDED
|
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import numpy
|
| 4 |
+
import cupy
|
| 5 |
+
import cupy.linalg as linalg
|
| 6 |
+
# waiting implementation of the following modules in PR #4172
|
| 7 |
+
# from cupyx.scipy.linalg import (cho_factor, cho_solve)
|
| 8 |
+
from cupyx.scipy.sparse import linalg as splinalg
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _cholesky(B):
|
| 12 |
+
"""
|
| 13 |
+
Wrapper around `cupy.linalg.cholesky` that raises LinAlgError if there are
|
| 14 |
+
NaNs in the output
|
| 15 |
+
"""
|
| 16 |
+
R = cupy.linalg.cholesky(B)
|
| 17 |
+
if cupy.any(cupy.isnan(R)):
|
| 18 |
+
raise numpy.linalg.LinAlgError
|
| 19 |
+
return R
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# TODO: This helper function can be replaced after cupy.block is supported
|
| 23 |
+
def _bmat(list_obj):
|
| 24 |
+
"""
|
| 25 |
+
Helper function to create a block matrix in cupy from a list
|
| 26 |
+
of smaller 2D dense arrays
|
| 27 |
+
"""
|
| 28 |
+
n_rows = len(list_obj)
|
| 29 |
+
n_cols = len(list_obj[0])
|
| 30 |
+
final_shape = [0, 0]
|
| 31 |
+
# calculating expected size of output
|
| 32 |
+
for i in range(n_rows):
|
| 33 |
+
final_shape[0] += list_obj[i][0].shape[0]
|
| 34 |
+
for j in range(n_cols):
|
| 35 |
+
final_shape[1] += list_obj[0][j].shape[1]
|
| 36 |
+
# obtaining result's datatype
|
| 37 |
+
dtype = cupy.result_type(*[arr.dtype for
|
| 38 |
+
list_iter in list_obj for arr in list_iter])
|
| 39 |
+
# checking order
|
| 40 |
+
F_order = all(arr.flags['F_CONTIGUOUS'] for list_iter
|
| 41 |
+
in list_obj for arr in list_iter)
|
| 42 |
+
C_order = all(arr.flags['C_CONTIGUOUS'] for list_iter
|
| 43 |
+
in list_obj for arr in list_iter)
|
| 44 |
+
order = 'F' if F_order and not C_order else 'C'
|
| 45 |
+
result = cupy.empty(tuple(final_shape), dtype=dtype, order=order)
|
| 46 |
+
|
| 47 |
+
start_idx_row = 0
|
| 48 |
+
start_idx_col = 0
|
| 49 |
+
end_idx_row = 0
|
| 50 |
+
end_idx_col = 0
|
| 51 |
+
for i in range(n_rows):
|
| 52 |
+
end_idx_row = start_idx_row + list_obj[i][0].shape[0]
|
| 53 |
+
start_idx_col = 0
|
| 54 |
+
for j in range(n_cols):
|
| 55 |
+
end_idx_col = start_idx_col + list_obj[i][j].shape[1]
|
| 56 |
+
result[start_idx_row:end_idx_row,
|
| 57 |
+
start_idx_col: end_idx_col] = list_obj[i][j]
|
| 58 |
+
start_idx_col = end_idx_col
|
| 59 |
+
start_idx_row = end_idx_row
|
| 60 |
+
return result
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _report_nonhermitian(M, name):
|
| 64 |
+
"""
|
| 65 |
+
Report if `M` is not a hermitian matrix given its type.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
md = M - M.T.conj()
|
| 69 |
+
|
| 70 |
+
nmd = linalg.norm(md, 1)
|
| 71 |
+
tol = 10 * cupy.finfo(M.dtype).eps
|
| 72 |
+
tol *= max(1, float(linalg.norm(M, 1)))
|
| 73 |
+
if nmd > tol:
|
| 74 |
+
warnings.warn(
|
| 75 |
+
f'Matrix {name} of the type {M.dtype} is not Hermitian: '
|
| 76 |
+
f'condition: {nmd} < {tol} fails.',
|
| 77 |
+
UserWarning, stacklevel=4)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _as2d(ar):
|
| 81 |
+
"""
|
| 82 |
+
If the input array is 2D return it, if it is 1D, append a dimension,
|
| 83 |
+
making it a column vector.
|
| 84 |
+
"""
|
| 85 |
+
if ar.ndim == 2:
|
| 86 |
+
return ar
|
| 87 |
+
else: # Assume 1!
|
| 88 |
+
aux = cupy.array(ar, copy=False)
|
| 89 |
+
aux.shape = (ar.shape[0], 1)
|
| 90 |
+
return aux
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _makeOperator(operatorInput, expectedShape):
|
| 94 |
+
"""Takes a dense numpy array or a sparse matrix or
|
| 95 |
+
a function and makes an operator performing matrix * blockvector
|
| 96 |
+
products.
|
| 97 |
+
"""
|
| 98 |
+
if operatorInput is None:
|
| 99 |
+
return None
|
| 100 |
+
else:
|
| 101 |
+
operator = splinalg.aslinearoperator(operatorInput)
|
| 102 |
+
|
| 103 |
+
if operator.shape != expectedShape:
|
| 104 |
+
raise ValueError('operator has invalid shape')
|
| 105 |
+
|
| 106 |
+
return operator
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _applyConstraints(blockVectorV, YBY, blockVectorBY, blockVectorY):
|
| 110 |
+
"""Changes blockVectorV in place."""
|
| 111 |
+
YBV = cupy.dot(blockVectorBY.T.conj(), blockVectorV)
|
| 112 |
+
# awaiting the implementation of cho_solve in PR #4172
|
| 113 |
+
# tmp = cho_solve(factYBY, YBV)
|
| 114 |
+
tmp = linalg.solve(YBY, YBV)
|
| 115 |
+
blockVectorV -= cupy.dot(blockVectorY, tmp)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
|
| 119 |
+
"""B-orthonormalize the given block vector using Cholesky."""
|
| 120 |
+
normalization = blockVectorV.max(
|
| 121 |
+
axis=0) + cupy.finfo(blockVectorV.dtype).eps
|
| 122 |
+
blockVectorV = blockVectorV / normalization
|
| 123 |
+
if blockVectorBV is None:
|
| 124 |
+
if B is not None:
|
| 125 |
+
blockVectorBV = B(blockVectorV)
|
| 126 |
+
else:
|
| 127 |
+
blockVectorBV = blockVectorV
|
| 128 |
+
else:
|
| 129 |
+
blockVectorBV = blockVectorBV / normalization
|
| 130 |
+
VBV = cupy.matmul(blockVectorV.T.conj(), blockVectorBV)
|
| 131 |
+
try:
|
| 132 |
+
# VBV is a Cholesky factor
|
| 133 |
+
VBV = _cholesky(VBV)
|
| 134 |
+
VBV = linalg.inv(VBV.T)
|
| 135 |
+
blockVectorV = cupy.matmul(blockVectorV, VBV)
|
| 136 |
+
if B is not None:
|
| 137 |
+
blockVectorBV = cupy.matmul(blockVectorBV, VBV)
|
| 138 |
+
else:
|
| 139 |
+
blockVectorBV = None
|
| 140 |
+
except numpy.linalg.LinAlgError:
|
| 141 |
+
# LinAlg Error: cholesky transformation might fail in rare cases
|
| 142 |
+
# raise ValueError("cholesky has failed")
|
| 143 |
+
blockVectorV = None
|
| 144 |
+
blockVectorBV = None
|
| 145 |
+
VBV = None
|
| 146 |
+
|
| 147 |
+
if retInvR:
|
| 148 |
+
return blockVectorV, blockVectorBV, VBV, normalization
|
| 149 |
+
else:
|
| 150 |
+
return blockVectorV, blockVectorBV
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _get_indx(_lambda, num, largest):
|
| 154 |
+
"""Get `num` indices into `_lambda` depending on `largest` option."""
|
| 155 |
+
ii = cupy.argsort(_lambda)
|
| 156 |
+
if largest:
|
| 157 |
+
ii = ii[:-num - 1:-1]
|
| 158 |
+
else:
|
| 159 |
+
ii = ii[:num]
|
| 160 |
+
return ii
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# TODO: This helper function can be replaced after cupy.eigh
|
| 164 |
+
# supports generalized eigen value problems.
|
| 165 |
+
def _eigh(A, B=None):
|
| 166 |
+
"""
|
| 167 |
+
Helper function for converting a generalized eigenvalue problem
|
| 168 |
+
A(X) = lambda(B(X)) to standard eigen value problem using cholesky
|
| 169 |
+
transformation
|
| 170 |
+
"""
|
| 171 |
+
if B is None: # use cupy's eigh in standard case
|
| 172 |
+
vals, vecs = linalg.eigh(A)
|
| 173 |
+
return vals, vecs
|
| 174 |
+
R = _cholesky(B)
|
| 175 |
+
RTi = linalg.inv(R)
|
| 176 |
+
Ri = linalg.inv(R.T)
|
| 177 |
+
F = cupy.matmul(RTi, cupy.matmul(A, Ri))
|
| 178 |
+
vals, vecs = linalg.eigh(F)
|
| 179 |
+
eigVec = cupy.matmul(Ri, vecs)
|
| 180 |
+
return vals, eigVec
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def lobpcg(A, X,
|
| 184 |
+
B=None, M=None, Y=None,
|
| 185 |
+
tol=None, maxiter=None,
|
| 186 |
+
largest=True, verbosityLevel=0,
|
| 187 |
+
retLambdaHistory=False, retResidualNormsHistory=False):
|
| 188 |
+
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
|
| 189 |
+
|
| 190 |
+
LOBPCG is a preconditioned eigensolver for large symmetric positive
|
| 191 |
+
definite (SPD) generalized eigenproblems.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
A (array-like): The symmetric linear operator of the problem,
|
| 195 |
+
usually a sparse matrix. Can be of the following types
|
| 196 |
+
- cupy.ndarray
|
| 197 |
+
- cupyx.scipy.sparse.csr_matrix
|
| 198 |
+
- cupy.scipy.sparse.linalg.LinearOperator
|
| 199 |
+
X (cupy.ndarray): Initial approximation to the ``k``
|
| 200 |
+
eigenvectors (non-sparse). If `A` has ``shape=(n,n)``
|
| 201 |
+
then `X` should have shape ``shape=(n,k)``.
|
| 202 |
+
B (array-like): The right hand side operator in a generalized
|
| 203 |
+
eigenproblem. By default, ``B = Identity``.
|
| 204 |
+
Can be of following types:
|
| 205 |
+
- cupy.ndarray
|
| 206 |
+
- cupyx.scipy.sparse.csr_matrix
|
| 207 |
+
- cupy.scipy.sparse.linalg.LinearOperator
|
| 208 |
+
M (array-like): Preconditioner to `A`; by default ``M = Identity``.
|
| 209 |
+
`M` should approximate the inverse of `A`.
|
| 210 |
+
Can be of the following types:
|
| 211 |
+
- cupy.ndarray
|
| 212 |
+
- cupyx.scipy.sparse.csr_matrix
|
| 213 |
+
- cupy.scipy.sparse.linalg.LinearOperator
|
| 214 |
+
Y (cupy.ndarray):
|
| 215 |
+
`n-by-sizeY` matrix of constraints (non-sparse), `sizeY < n`
|
| 216 |
+
The iterations will be performed in the B-orthogonal complement
|
| 217 |
+
of the column-space of Y. Y must be full rank.
|
| 218 |
+
tol (float):
|
| 219 |
+
Solver tolerance (stopping criterion).
|
| 220 |
+
The default is ``tol=n*sqrt(eps)``.
|
| 221 |
+
maxiter (int):
|
| 222 |
+
Maximum number of iterations. The default is ``maxiter = 20``.
|
| 223 |
+
largest (bool):
|
| 224 |
+
When True, solve for the largest eigenvalues,
|
| 225 |
+
otherwise the smallest.
|
| 226 |
+
verbosityLevel (int):
|
| 227 |
+
Controls solver output. The default is ``verbosityLevel=0``.
|
| 228 |
+
retLambdaHistory (bool):
|
| 229 |
+
Whether to return eigenvalue history. Default is False.
|
| 230 |
+
retResidualNormsHistory (bool):
|
| 231 |
+
Whether to return history of residual norms. Default is False.
|
| 232 |
+
|
| 233 |
+
Returns:
|
| 234 |
+
tuple:
|
| 235 |
+
- `w` (cupy.ndarray): Array of ``k`` eigenvalues
|
| 236 |
+
- `v` (cupy.ndarray) An array of ``k`` eigenvectors.
|
| 237 |
+
`v` has the same shape as `X`.
|
| 238 |
+
- `lambdas` (list of cupy.ndarray): The eigenvalue history,
|
| 239 |
+
if `retLambdaHistory` is True.
|
| 240 |
+
- `rnorms` (list of cupy.ndarray): The history of residual norms,
|
| 241 |
+
if `retResidualNormsHistory` is True.
|
| 242 |
+
|
| 243 |
+
.. seealso:: :func:`scipy.sparse.linalg.lobpcg`
|
| 244 |
+
|
| 245 |
+
.. note::
|
| 246 |
+
If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are `True`
|
| 247 |
+
the return tuple has the following format
|
| 248 |
+
``(lambda, V, lambda history, residual norms history)``.
|
| 249 |
+
"""
|
| 250 |
+
blockVectorX = X
|
| 251 |
+
blockVectorY = Y
|
| 252 |
+
residualTolerance = tol
|
| 253 |
+
|
| 254 |
+
if maxiter is None:
|
| 255 |
+
maxiter = 20
|
| 256 |
+
|
| 257 |
+
if blockVectorY is not None:
|
| 258 |
+
sizeY = blockVectorY.shape[1]
|
| 259 |
+
else:
|
| 260 |
+
sizeY = 0
|
| 261 |
+
|
| 262 |
+
if len(blockVectorX.shape) != 2:
|
| 263 |
+
raise ValueError('expected rank-2 array for argument X')
|
| 264 |
+
|
| 265 |
+
n, sizeX = blockVectorX.shape
|
| 266 |
+
|
| 267 |
+
if verbosityLevel:
|
| 268 |
+
aux = "Solving "
|
| 269 |
+
if B is None:
|
| 270 |
+
aux += "standard"
|
| 271 |
+
else:
|
| 272 |
+
aux += "generalized"
|
| 273 |
+
aux += " eigenvalue problem with"
|
| 274 |
+
if M is None:
|
| 275 |
+
aux += "out"
|
| 276 |
+
aux += " preconditioning\n\n"
|
| 277 |
+
aux += "matrix size %d\n" % n
|
| 278 |
+
aux += "block size %d\n\n" % sizeX
|
| 279 |
+
if blockVectorY is None:
|
| 280 |
+
aux += "No constraints\n\n"
|
| 281 |
+
else:
|
| 282 |
+
if sizeY > 1:
|
| 283 |
+
aux += "%d constraints\n\n" % sizeY
|
| 284 |
+
else:
|
| 285 |
+
aux += "%d constraint\n\n" % sizeY
|
| 286 |
+
print(aux)
|
| 287 |
+
|
| 288 |
+
A = _makeOperator(A, (n, n))
|
| 289 |
+
B = _makeOperator(B, (n, n))
|
| 290 |
+
M = _makeOperator(M, (n, n))
|
| 291 |
+
|
| 292 |
+
if (n - sizeY) < (5 * sizeX):
|
| 293 |
+
# The problem size is small compared to the block size.
|
| 294 |
+
# Using dense general eigensolver instead of LOBPCG.
|
| 295 |
+
sizeX = min(sizeX, n)
|
| 296 |
+
|
| 297 |
+
if blockVectorY is not None:
|
| 298 |
+
raise NotImplementedError('The dense eigensolver '
|
| 299 |
+
'does not support constraints.')
|
| 300 |
+
|
| 301 |
+
A_dense = A(cupy.eye(n, dtype=A.dtype))
|
| 302 |
+
B_dense = None if B is None else B(cupy.eye(n, dtype=B.dtype))
|
| 303 |
+
|
| 304 |
+
# call numerically unstable general eigen solver
|
| 305 |
+
vals, vecs = _eigh(A_dense, B_dense)
|
| 306 |
+
if largest:
|
| 307 |
+
# Reverse order to be compatible with eigs() in 'LM' mode.
|
| 308 |
+
vals = vals[::-1]
|
| 309 |
+
vecs = vecs[:, ::-1]
|
| 310 |
+
|
| 311 |
+
vals = vals[:sizeX]
|
| 312 |
+
vecs = vecs[:, :sizeX]
|
| 313 |
+
|
| 314 |
+
return vals, vecs
|
| 315 |
+
|
| 316 |
+
if (residualTolerance is None) or (residualTolerance <= 0.0):
|
| 317 |
+
residualTolerance = cupy.sqrt(1e-15) * n
|
| 318 |
+
|
| 319 |
+
# Apply constraints to X.
|
| 320 |
+
if blockVectorY is not None:
|
| 321 |
+
|
| 322 |
+
if B is not None:
|
| 323 |
+
blockVectorBY = B(blockVectorY)
|
| 324 |
+
else:
|
| 325 |
+
blockVectorBY = blockVectorY
|
| 326 |
+
|
| 327 |
+
# gramYBY is a dense array.
|
| 328 |
+
gramYBY = cupy.dot(blockVectorY.T.conj(), blockVectorBY)
|
| 329 |
+
|
| 330 |
+
# awaiting implementation of cho_factor in PR #4172
|
| 331 |
+
# try:
|
| 332 |
+
# gramYBY is a Cholesky factor from now on...
|
| 333 |
+
# gramYBY = cho_factor(gramYBY)
|
| 334 |
+
# except numpy.linalg.LinAlgError:
|
| 335 |
+
# raise ValueError("cannot handle linearly dependent constraints")
|
| 336 |
+
|
| 337 |
+
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
|
| 338 |
+
|
| 339 |
+
# B-orthonormalize X.
|
| 340 |
+
blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
|
| 341 |
+
|
| 342 |
+
# Compute the initial Ritz vectors: solve the eigenproblem.
|
| 343 |
+
blockVectorAX = A(blockVectorX)
|
| 344 |
+
gramXAX = cupy.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 345 |
+
|
| 346 |
+
_lambda, eigBlockVector = _eigh(gramXAX)
|
| 347 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 348 |
+
_lambda = _lambda[ii]
|
| 349 |
+
|
| 350 |
+
eigBlockVector = cupy.asarray(eigBlockVector[:, ii])
|
| 351 |
+
blockVectorX = cupy.dot(blockVectorX, eigBlockVector)
|
| 352 |
+
blockVectorAX = cupy.dot(blockVectorAX, eigBlockVector)
|
| 353 |
+
if B is not None:
|
| 354 |
+
blockVectorBX = cupy.dot(blockVectorBX, eigBlockVector)
|
| 355 |
+
|
| 356 |
+
# Active index set.
|
| 357 |
+
activeMask = cupy.ones((sizeX,), dtype=bool)
|
| 358 |
+
|
| 359 |
+
lambdaHistory = [_lambda]
|
| 360 |
+
residualNormsHistory = []
|
| 361 |
+
|
| 362 |
+
previousBlockSize = sizeX
|
| 363 |
+
ident = cupy.eye(sizeX, dtype=A.dtype)
|
| 364 |
+
ident0 = cupy.eye(sizeX, dtype=A.dtype)
|
| 365 |
+
|
| 366 |
+
##
|
| 367 |
+
# Main iteration loop.
|
| 368 |
+
|
| 369 |
+
blockVectorP = None # set during iteration
|
| 370 |
+
blockVectorAP = None
|
| 371 |
+
blockVectorBP = None
|
| 372 |
+
|
| 373 |
+
iterationNumber = -1
|
| 374 |
+
restart = True
|
| 375 |
+
explicitGramFlag = False
|
| 376 |
+
while iterationNumber < maxiter:
|
| 377 |
+
iterationNumber += 1
|
| 378 |
+
|
| 379 |
+
if B is not None:
|
| 380 |
+
aux = blockVectorBX * _lambda[cupy.newaxis, :]
|
| 381 |
+
else:
|
| 382 |
+
aux = blockVectorX * _lambda[cupy.newaxis, :]
|
| 383 |
+
|
| 384 |
+
blockVectorR = blockVectorAX - aux
|
| 385 |
+
|
| 386 |
+
aux = cupy.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 387 |
+
residualNorms = cupy.sqrt(aux)
|
| 388 |
+
|
| 389 |
+
residualNormsHistory.append(residualNorms)
|
| 390 |
+
|
| 391 |
+
ii = cupy.where(residualNorms > residualTolerance, True, False)
|
| 392 |
+
activeMask = activeMask & ii
|
| 393 |
+
|
| 394 |
+
currentBlockSize = int(activeMask.sum())
|
| 395 |
+
if currentBlockSize != previousBlockSize:
|
| 396 |
+
previousBlockSize = currentBlockSize
|
| 397 |
+
ident = cupy.eye(currentBlockSize, dtype=A.dtype)
|
| 398 |
+
|
| 399 |
+
if currentBlockSize == 0:
|
| 400 |
+
break
|
| 401 |
+
|
| 402 |
+
if verbosityLevel > 0:
|
| 403 |
+
print('iteration %d' % iterationNumber)
|
| 404 |
+
print(f'current block size: {currentBlockSize}')
|
| 405 |
+
print(f'eigenvalue(s):\n{_lambda}')
|
| 406 |
+
print(f'residual norm(s):\n{residualNorms}')
|
| 407 |
+
if verbosityLevel > 10:
|
| 408 |
+
print(eigBlockVector)
|
| 409 |
+
|
| 410 |
+
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
|
| 411 |
+
|
| 412 |
+
if iterationNumber > 0:
|
| 413 |
+
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
|
| 414 |
+
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
|
| 415 |
+
if B is not None:
|
| 416 |
+
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
|
| 417 |
+
|
| 418 |
+
if M is not None:
|
| 419 |
+
# Apply preconditioner T to the active residuals.
|
| 420 |
+
activeBlockVectorR = M(activeBlockVectorR)
|
| 421 |
+
|
| 422 |
+
# Apply constraints to the preconditioned residuals.
|
| 423 |
+
if blockVectorY is not None:
|
| 424 |
+
_applyConstraints(activeBlockVectorR,
|
| 425 |
+
gramYBY, blockVectorBY, blockVectorY)
|
| 426 |
+
|
| 427 |
+
# B-orthogonalize the preconditioned residuals to X.
|
| 428 |
+
if B is not None:
|
| 429 |
+
activeBlockVectorR = activeBlockVectorR\
|
| 430 |
+
- cupy.matmul(blockVectorX,
|
| 431 |
+
cupy
|
| 432 |
+
.matmul(blockVectorBX.T.conj(),
|
| 433 |
+
activeBlockVectorR))
|
| 434 |
+
else:
|
| 435 |
+
activeBlockVectorR = activeBlockVectorR - \
|
| 436 |
+
cupy.matmul(blockVectorX,
|
| 437 |
+
cupy.matmul(blockVectorX.T.conj(),
|
| 438 |
+
activeBlockVectorR))
|
| 439 |
+
|
| 440 |
+
##
|
| 441 |
+
# B-orthonormalize the preconditioned residuals.
|
| 442 |
+
aux = _b_orthonormalize(B, activeBlockVectorR)
|
| 443 |
+
activeBlockVectorR, activeBlockVectorBR = aux
|
| 444 |
+
|
| 445 |
+
activeBlockVectorAR = A(activeBlockVectorR)
|
| 446 |
+
|
| 447 |
+
if iterationNumber > 0:
|
| 448 |
+
if B is not None:
|
| 449 |
+
aux = _b_orthonormalize(B, activeBlockVectorP,
|
| 450 |
+
activeBlockVectorBP, retInvR=True)
|
| 451 |
+
activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
|
| 452 |
+
else:
|
| 453 |
+
aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
|
| 454 |
+
activeBlockVectorP, _, invR, normal = aux
|
| 455 |
+
# Function _b_orthonormalize returns None if Cholesky fails
|
| 456 |
+
if activeBlockVectorP is not None:
|
| 457 |
+
activeBlockVectorAP = activeBlockVectorAP / normal
|
| 458 |
+
activeBlockVectorAP = cupy.dot(activeBlockVectorAP, invR)
|
| 459 |
+
restart = False
|
| 460 |
+
else:
|
| 461 |
+
restart = True
|
| 462 |
+
|
| 463 |
+
##
|
| 464 |
+
# Perform the Rayleigh Ritz Procedure:
|
| 465 |
+
# Compute symmetric Gram matrices:
|
| 466 |
+
|
| 467 |
+
if activeBlockVectorAR.dtype == 'float32':
|
| 468 |
+
myeps = 1
|
| 469 |
+
elif activeBlockVectorR.dtype == 'float32':
|
| 470 |
+
myeps = 1e-4
|
| 471 |
+
else:
|
| 472 |
+
myeps = 1e-8
|
| 473 |
+
|
| 474 |
+
if residualNorms.max() > myeps and not explicitGramFlag:
|
| 475 |
+
explicitGramFlag = False
|
| 476 |
+
else:
|
| 477 |
+
# Once explicitGramFlag, forever explicitGramFlag.
|
| 478 |
+
explicitGramFlag = True
|
| 479 |
+
|
| 480 |
+
# Shared memory assignments to simplify the code
|
| 481 |
+
if B is None:
|
| 482 |
+
blockVectorBX = blockVectorX
|
| 483 |
+
activeBlockVectorBR = activeBlockVectorR
|
| 484 |
+
if not restart:
|
| 485 |
+
activeBlockVectorBP = activeBlockVectorP
|
| 486 |
+
|
| 487 |
+
# Common submatrices:
|
| 488 |
+
gramXAR = cupy.dot(blockVectorX.T.conj(), activeBlockVectorAR)
|
| 489 |
+
gramRAR = cupy.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
|
| 490 |
+
|
| 491 |
+
if explicitGramFlag:
|
| 492 |
+
gramRAR = (gramRAR + gramRAR.T.conj()) / 2
|
| 493 |
+
gramXAX = cupy.dot(blockVectorX.T.conj(), blockVectorAX)
|
| 494 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
| 495 |
+
gramXBX = cupy.dot(blockVectorX.T.conj(), blockVectorBX)
|
| 496 |
+
gramRBR = cupy.dot(activeBlockVectorR.T.conj(),
|
| 497 |
+
activeBlockVectorBR)
|
| 498 |
+
gramXBR = cupy.dot(blockVectorX.T.conj(), activeBlockVectorBR)
|
| 499 |
+
else:
|
| 500 |
+
gramXAX = cupy.diag(_lambda)
|
| 501 |
+
gramXBX = ident0
|
| 502 |
+
gramRBR = ident
|
| 503 |
+
gramXBR = cupy.zeros((int(sizeX), int(currentBlockSize)),
|
| 504 |
+
dtype=A.dtype)
|
| 505 |
+
|
| 506 |
+
def _handle_gramA_gramB_verbosity(gramA, gramB):
|
| 507 |
+
if verbosityLevel > 0:
|
| 508 |
+
_report_nonhermitian(gramA, 'gramA')
|
| 509 |
+
_report_nonhermitian(gramB, 'gramB')
|
| 510 |
+
if verbosityLevel > 10:
|
| 511 |
+
# Note: not documented, but leave it in here for now
|
| 512 |
+
numpy.savetxt('gramA.txt', cupy.asnumpy(gramA))
|
| 513 |
+
numpy.savetxt('gramB.txt', cupy.asnumpy(gramB))
|
| 514 |
+
|
| 515 |
+
if not restart:
|
| 516 |
+
gramXAP = cupy.dot(blockVectorX.T.conj(), activeBlockVectorAP)
|
| 517 |
+
gramRAP = cupy.dot(activeBlockVectorR.T.conj(),
|
| 518 |
+
activeBlockVectorAP)
|
| 519 |
+
gramPAP = cupy.dot(activeBlockVectorP.T.conj(),
|
| 520 |
+
activeBlockVectorAP)
|
| 521 |
+
gramXBP = cupy.dot(blockVectorX.T.conj(), activeBlockVectorBP)
|
| 522 |
+
gramRBP = cupy.dot(activeBlockVectorR.T.conj(),
|
| 523 |
+
activeBlockVectorBP)
|
| 524 |
+
if explicitGramFlag:
|
| 525 |
+
gramPAP = (gramPAP + gramPAP.T.conj()) / 2
|
| 526 |
+
gramPBP = cupy.dot(activeBlockVectorP.T.conj(),
|
| 527 |
+
activeBlockVectorBP)
|
| 528 |
+
else:
|
| 529 |
+
gramPBP = ident
|
| 530 |
+
|
| 531 |
+
gramA = _bmat([[gramXAX, gramXAR, gramXAP],
|
| 532 |
+
[gramXAR.T.conj(), gramRAR, gramRAP],
|
| 533 |
+
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]])
|
| 534 |
+
gramB = _bmat([[gramXBX, gramXBR, gramXBP],
|
| 535 |
+
[gramXBR.T.conj(), gramRBR, gramRBP],
|
| 536 |
+
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]])
|
| 537 |
+
|
| 538 |
+
_handle_gramA_gramB_verbosity(gramA, gramB)
|
| 539 |
+
|
| 540 |
+
try:
|
| 541 |
+
_lambda, eigBlockVector = _eigh(gramA, gramB)
|
| 542 |
+
except numpy.linalg.LinAlgError:
|
| 543 |
+
# try again after dropping the direction vectors P from RR
|
| 544 |
+
restart = True
|
| 545 |
+
|
| 546 |
+
if restart:
|
| 547 |
+
gramA = _bmat([[gramXAX, gramXAR],
|
| 548 |
+
[gramXAR.T.conj(), gramRAR]])
|
| 549 |
+
gramB = _bmat([[gramXBX, gramXBR],
|
| 550 |
+
[gramXBR.T.conj(), gramRBR]])
|
| 551 |
+
|
| 552 |
+
_handle_gramA_gramB_verbosity(gramA, gramB)
|
| 553 |
+
|
| 554 |
+
try:
|
| 555 |
+
_lambda, eigBlockVector = _eigh(gramA, gramB)
|
| 556 |
+
except numpy.linalg.LinAlgError:
|
| 557 |
+
raise ValueError('eigh has failed in lobpcg iterations')
|
| 558 |
+
|
| 559 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
| 560 |
+
if verbosityLevel > 10:
|
| 561 |
+
print(ii)
|
| 562 |
+
print(_lambda)
|
| 563 |
+
|
| 564 |
+
_lambda = _lambda[ii]
|
| 565 |
+
eigBlockVector = eigBlockVector[:, ii]
|
| 566 |
+
|
| 567 |
+
lambdaHistory.append(_lambda)
|
| 568 |
+
|
| 569 |
+
if verbosityLevel > 10:
|
| 570 |
+
print('lambda:', _lambda)
|
| 571 |
+
|
| 572 |
+
if verbosityLevel > 10:
|
| 573 |
+
print(eigBlockVector)
|
| 574 |
+
|
| 575 |
+
# Compute Ritz vectors.
|
| 576 |
+
if B is not None:
|
| 577 |
+
if not restart:
|
| 578 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 579 |
+
eigBlockVectorR = eigBlockVector[sizeX:sizeX +
|
| 580 |
+
currentBlockSize]
|
| 581 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
| 582 |
+
|
| 583 |
+
pp = cupy.dot(activeBlockVectorR, eigBlockVectorR)
|
| 584 |
+
pp += cupy.dot(activeBlockVectorP, eigBlockVectorP)
|
| 585 |
+
|
| 586 |
+
app = cupy.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 587 |
+
app += cupy.dot(activeBlockVectorAP, eigBlockVectorP)
|
| 588 |
+
|
| 589 |
+
bpp = cupy.dot(activeBlockVectorBR, eigBlockVectorR)
|
| 590 |
+
bpp += cupy.dot(activeBlockVectorBP, eigBlockVectorP)
|
| 591 |
+
else:
|
| 592 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 593 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
| 594 |
+
|
| 595 |
+
pp = cupy.dot(activeBlockVectorR, eigBlockVectorR)
|
| 596 |
+
app = cupy.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 597 |
+
bpp = cupy.dot(activeBlockVectorBR, eigBlockVectorR)
|
| 598 |
+
|
| 599 |
+
if verbosityLevel > 10:
|
| 600 |
+
print(pp)
|
| 601 |
+
print(app)
|
| 602 |
+
print(bpp)
|
| 603 |
+
|
| 604 |
+
blockVectorX = cupy.dot(blockVectorX, eigBlockVectorX) + pp
|
| 605 |
+
blockVectorAX = cupy.dot(blockVectorAX, eigBlockVectorX) + app
|
| 606 |
+
blockVectorBX = cupy.dot(blockVectorBX, eigBlockVectorX) + bpp
|
| 607 |
+
|
| 608 |
+
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
|
| 609 |
+
|
| 610 |
+
else:
|
| 611 |
+
if not restart:
|
| 612 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 613 |
+
eigBlockVectorR = eigBlockVector[sizeX:sizeX +
|
| 614 |
+
currentBlockSize]
|
| 615 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
| 616 |
+
|
| 617 |
+
pp = cupy.dot(activeBlockVectorR, eigBlockVectorR)
|
| 618 |
+
pp += cupy.dot(activeBlockVectorP, eigBlockVectorP)
|
| 619 |
+
|
| 620 |
+
app = cupy.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 621 |
+
app += cupy.dot(activeBlockVectorAP, eigBlockVectorP)
|
| 622 |
+
else:
|
| 623 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
| 624 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
| 625 |
+
|
| 626 |
+
pp = cupy.dot(activeBlockVectorR, eigBlockVectorR)
|
| 627 |
+
app = cupy.dot(activeBlockVectorAR, eigBlockVectorR)
|
| 628 |
+
|
| 629 |
+
if verbosityLevel > 10:
|
| 630 |
+
print(pp)
|
| 631 |
+
print(app)
|
| 632 |
+
|
| 633 |
+
blockVectorX = cupy.dot(blockVectorX, eigBlockVectorX) + pp
|
| 634 |
+
blockVectorAX = cupy.dot(blockVectorAX, eigBlockVectorX) + app
|
| 635 |
+
|
| 636 |
+
blockVectorP, blockVectorAP = pp, app
|
| 637 |
+
|
| 638 |
+
if B is not None:
|
| 639 |
+
aux = blockVectorBX * _lambda[cupy.newaxis, :]
|
| 640 |
+
|
| 641 |
+
else:
|
| 642 |
+
aux = blockVectorX * _lambda[cupy.newaxis, :]
|
| 643 |
+
|
| 644 |
+
blockVectorR = blockVectorAX - aux
|
| 645 |
+
|
| 646 |
+
aux = cupy.sum(blockVectorR.conj() * blockVectorR, 0)
|
| 647 |
+
residualNorms = cupy.sqrt(aux)
|
| 648 |
+
|
| 649 |
+
if verbosityLevel > 0:
|
| 650 |
+
print(f'Final iterative eigenvalue(s):\n{_lambda}')
|
| 651 |
+
print(f'Final iterative residual norm(s):\n{residualNorms}')
|
| 652 |
+
|
| 653 |
+
# Future work:
|
| 654 |
+
# Generalized eigen value solver like `scipy.linalg.eigh`
|
| 655 |
+
# that takes in `B` matrix as input
|
| 656 |
+
# `cupy.linalg.cholesky` is more unstable than `scipy.linalg.cholesky`
|
| 657 |
+
# Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
|
| 658 |
+
# Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
|
| 659 |
+
# Computing the actual true residuals
|
| 660 |
+
|
| 661 |
+
if verbosityLevel > 0:
|
| 662 |
+
print(f'Final postprocessing eigenvalue(s):\n{_lambda}')
|
| 663 |
+
print(f'Final residual norm(s):\n{residualNorms}')
|
| 664 |
+
|
| 665 |
+
if retLambdaHistory:
|
| 666 |
+
if retResidualNormsHistory:
|
| 667 |
+
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
|
| 668 |
+
else:
|
| 669 |
+
return _lambda, blockVectorX, lambdaHistory
|
| 670 |
+
else:
|
| 671 |
+
if retResidualNormsHistory:
|
| 672 |
+
return _lambda, blockVectorX, residualNormsHistory
|
| 673 |
+
else:
|
| 674 |
+
return _lambda, blockVectorX
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_norm.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
|
| 3 |
+
import cupy
|
| 4 |
+
import cupyx.scipy.sparse
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _sparse_frobenius_norm(x):
|
| 8 |
+
if cupy.issubdtype(x.dtype, cupy.complexfloating):
|
| 9 |
+
sqnorm = abs(x).power(2).sum()
|
| 10 |
+
else:
|
| 11 |
+
sqnorm = x.power(2).sum()
|
| 12 |
+
return cupy.sqrt(sqnorm)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def norm(x, ord=None, axis=None):
|
| 16 |
+
"""Norm of a cupy.scipy.spmatrix
|
| 17 |
+
|
| 18 |
+
This function is able to return one of seven different sparse matrix norms,
|
| 19 |
+
depending on the value of the ``ord`` parameter.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
x (sparse matrix) : Input sparse matrix.
|
| 23 |
+
ord (non-zero int, inf, -inf, 'fro', optional) : Order of the norm (see
|
| 24 |
+
table under ``Notes``). inf means numpy's `inf` object.
|
| 25 |
+
axis : (int, 2-tuple of ints, None, optional): If `axis` is an
|
| 26 |
+
integer, it specifies the axis of `x` along which to
|
| 27 |
+
compute the vector norms. If `axis` is a 2-tuple, it specifies the
|
| 28 |
+
axes that hold 2-D matrices, and the matrix norms of these matrices
|
| 29 |
+
are computed. If `axis` is None then either a vector norm
|
| 30 |
+
(when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned.
|
| 31 |
+
Returns:
|
| 32 |
+
ndarray : 0-D or 1-D array or norm(s).
|
| 33 |
+
|
| 34 |
+
.. seealso:: :func:`scipy.sparse.linalg.norm`
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
if not cupyx.scipy.sparse.issparse(x):
|
| 38 |
+
raise TypeError(("input is not sparse. use cupy.linalg.norm"))
|
| 39 |
+
|
| 40 |
+
# Check the default case first and handle it immediately.
|
| 41 |
+
if axis is None and ord in (None, 'fro', 'f'):
|
| 42 |
+
return _sparse_frobenius_norm(x)
|
| 43 |
+
|
| 44 |
+
# Some norms require functions that are not implemented for all types.
|
| 45 |
+
x = x.tocsr()
|
| 46 |
+
|
| 47 |
+
if axis is None:
|
| 48 |
+
axis = (0, 1)
|
| 49 |
+
elif not isinstance(axis, tuple):
|
| 50 |
+
msg = "'axis' must be None, an integer or a tuple of integers"
|
| 51 |
+
try:
|
| 52 |
+
int_axis = int(axis)
|
| 53 |
+
except TypeError:
|
| 54 |
+
raise TypeError(msg)
|
| 55 |
+
if axis != int_axis:
|
| 56 |
+
raise TypeError(msg)
|
| 57 |
+
axis = (int_axis,)
|
| 58 |
+
|
| 59 |
+
nd = 2
|
| 60 |
+
if len(axis) == 2:
|
| 61 |
+
row_axis, col_axis = axis
|
| 62 |
+
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
|
| 63 |
+
raise ValueError('Invalid axis %r for an array with shape %r' %
|
| 64 |
+
(axis, x.shape))
|
| 65 |
+
if row_axis % nd == col_axis % nd:
|
| 66 |
+
raise ValueError('Duplicate axes given.')
|
| 67 |
+
if ord == 2:
|
| 68 |
+
raise NotImplementedError
|
| 69 |
+
# return _multi_svd_norm(x, row_axis, col_axis, amax)
|
| 70 |
+
elif ord == -2:
|
| 71 |
+
raise NotImplementedError
|
| 72 |
+
# return _multi_svd_norm(x, row_axis, col_axis, amin)
|
| 73 |
+
elif ord == 1:
|
| 74 |
+
return abs(x).sum(axis=row_axis).max()
|
| 75 |
+
elif ord == numpy.inf:
|
| 76 |
+
return abs(x).sum(axis=col_axis).max()
|
| 77 |
+
elif ord == -1:
|
| 78 |
+
return abs(x).sum(axis=row_axis).min()
|
| 79 |
+
elif ord == -numpy.inf:
|
| 80 |
+
return abs(x).sum(axis=col_axis).min()
|
| 81 |
+
elif ord in (None, 'f', 'fro'):
|
| 82 |
+
# The axis order does not matter for this norm.
|
| 83 |
+
return _sparse_frobenius_norm(x)
|
| 84 |
+
else:
|
| 85 |
+
raise ValueError("Invalid norm order for matrices.")
|
| 86 |
+
elif len(axis) == 1:
|
| 87 |
+
a, = axis
|
| 88 |
+
if not (-nd <= a < nd):
|
| 89 |
+
raise ValueError('Invalid axis %r for an array with shape %r' %
|
| 90 |
+
(axis, x.shape))
|
| 91 |
+
if ord == numpy.inf:
|
| 92 |
+
return abs(x).max(axis=a).toarray().ravel()
|
| 93 |
+
elif ord == -numpy.inf:
|
| 94 |
+
return abs(x).min(axis=a).toarray().ravel()
|
| 95 |
+
elif ord == 0:
|
| 96 |
+
# Zero norm
|
| 97 |
+
return (x != 0).astype(numpy.float32).sum(axis=a).ravel().astype(
|
| 98 |
+
numpy.int_)
|
| 99 |
+
elif ord == 1:
|
| 100 |
+
# special case for speedup
|
| 101 |
+
return abs(x).sum(axis=a).ravel()
|
| 102 |
+
elif ord in (2, None):
|
| 103 |
+
return cupy.sqrt(abs(x).power(2).sum(axis=a)).ravel()
|
| 104 |
+
else:
|
| 105 |
+
try:
|
| 106 |
+
ord + 1
|
| 107 |
+
except TypeError:
|
| 108 |
+
raise ValueError('Invalid norm order for vectors.')
|
| 109 |
+
return cupy.power(abs(x).power(ord).sum(axis=a), 1 / ord).ravel()
|
| 110 |
+
else:
|
| 111 |
+
raise ValueError("Improper number of dimensions to norm.")
|
vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/linalg/_solve.py
ADDED
|
@@ -0,0 +1,1036 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
|
| 3 |
+
import cupy
|
| 4 |
+
from cupy import cublas
|
| 5 |
+
from cupy.cuda import device
|
| 6 |
+
from cupy.cuda import runtime
|
| 7 |
+
from cupy.linalg import _util
|
| 8 |
+
from cupyx.scipy import sparse
|
| 9 |
+
from cupyx.scipy.sparse.linalg import _interface
|
| 10 |
+
from cupyx.scipy.sparse.linalg._iterative import _make_system
|
| 11 |
+
|
| 12 |
+
import warnings
|
| 13 |
+
try:
|
| 14 |
+
import scipy.sparse
|
| 15 |
+
import scipy.sparse.linalg
|
| 16 |
+
scipy_available = True
|
| 17 |
+
except ImportError:
|
| 18 |
+
scipy_available = False
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def lsqr(A, b):
|
| 22 |
+
"""Solves linear system with QR decomposition.
|
| 23 |
+
|
| 24 |
+
Find the solution to a large, sparse, linear system of equations.
|
| 25 |
+
The function solves ``Ax = b``. Given two-dimensional matrix ``A`` is
|
| 26 |
+
decomposed into ``Q * R``.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
A (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): The input matrix
|
| 30 |
+
with dimension ``(N, N)``
|
| 31 |
+
b (cupy.ndarray): Right-hand side vector.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
tuple:
|
| 35 |
+
Its length must be ten. It has same type elements
|
| 36 |
+
as SciPy. Only the first element, the solution vector ``x``, is
|
| 37 |
+
available and other elements are expressed as ``None`` because
|
| 38 |
+
the implementation of cuSOLVER is different from the one of SciPy.
|
| 39 |
+
You can easily calculate the fourth element by ``norm(b - Ax)``
|
| 40 |
+
and the ninth element by ``norm(x)``.
|
| 41 |
+
|
| 42 |
+
.. seealso:: :func:`scipy.sparse.linalg.lsqr`
|
| 43 |
+
"""
|
| 44 |
+
from cupy_backends.cuda.libs import cusolver
|
| 45 |
+
|
| 46 |
+
if runtime.is_hip:
|
| 47 |
+
raise RuntimeError('HIP does not support lsqr')
|
| 48 |
+
if not sparse.isspmatrix_csr(A):
|
| 49 |
+
A = sparse.csr_matrix(A)
|
| 50 |
+
# csr_matrix is 2d
|
| 51 |
+
_util._assert_stacked_square(A)
|
| 52 |
+
_util._assert_cupy_array(b)
|
| 53 |
+
m = A.shape[0]
|
| 54 |
+
if b.ndim != 1 or len(b) != m:
|
| 55 |
+
raise ValueError('b must be 1-d array whose size is same as A')
|
| 56 |
+
|
| 57 |
+
# Cast to float32 or float64
|
| 58 |
+
if A.dtype == 'f' or A.dtype == 'd':
|
| 59 |
+
dtype = A.dtype
|
| 60 |
+
else:
|
| 61 |
+
dtype = numpy.promote_types(A.dtype, 'f')
|
| 62 |
+
|
| 63 |
+
handle = device.get_cusolver_sp_handle()
|
| 64 |
+
nnz = A.nnz
|
| 65 |
+
tol = 1.0
|
| 66 |
+
reorder = 1
|
| 67 |
+
x = cupy.empty(m, dtype=dtype)
|
| 68 |
+
singularity = numpy.empty(1, numpy.int32)
|
| 69 |
+
|
| 70 |
+
if dtype == 'f':
|
| 71 |
+
csrlsvqr = cusolver.scsrlsvqr
|
| 72 |
+
else:
|
| 73 |
+
csrlsvqr = cusolver.dcsrlsvqr
|
| 74 |
+
csrlsvqr(
|
| 75 |
+
handle, m, nnz, A._descr.descriptor, A.data.data.ptr,
|
| 76 |
+
A.indptr.data.ptr, A.indices.data.ptr, b.data.ptr, tol, reorder,
|
| 77 |
+
x.data.ptr, singularity.ctypes.data)
|
| 78 |
+
|
| 79 |
+
# The return type of SciPy is always float64. Therefore, x must be casted.
|
| 80 |
+
x = x.astype(numpy.float64)
|
| 81 |
+
ret = (x, None, None, None, None, None, None, None, None, None)
|
| 82 |
+
return ret
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def lsmr(A, b, x0=None, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
|
| 86 |
+
maxiter=None):
|
| 87 |
+
"""Iterative solver for least-squares problems.
|
| 88 |
+
|
| 89 |
+
lsmr solves the system of linear equations ``Ax = b``. If the system
|
| 90 |
+
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
|
| 91 |
+
A is a rectangular matrix of dimension m-by-n, where all cases are
|
| 92 |
+
allowed: m = n, m > n, or m < n. B is a vector of length m.
|
| 93 |
+
The matrix A may be dense or sparse (usually sparse).
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
A (ndarray, spmatrix or LinearOperator): The real or complex
|
| 97 |
+
matrix of the linear system. ``A`` must be
|
| 98 |
+
:class:`cupy.ndarray`, :class:`cupyx.scipy.sparse.spmatrix` or
|
| 99 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 100 |
+
b (cupy.ndarray): Right hand side of the linear system with shape
|
| 101 |
+
``(m,)`` or ``(m, 1)``.
|
| 102 |
+
x0 (cupy.ndarray): Starting guess for the solution. If None zeros are
|
| 103 |
+
used.
|
| 104 |
+
damp (float): Damping factor for regularized least-squares.
|
| 105 |
+
`lsmr` solves the regularized least-squares problem
|
| 106 |
+
::
|
| 107 |
+
|
| 108 |
+
min ||(b) - ( A )x||
|
| 109 |
+
||(0) (damp*I) ||_2
|
| 110 |
+
|
| 111 |
+
where damp is a scalar. If damp is None or 0, the system
|
| 112 |
+
is solved without regularization.
|
| 113 |
+
atol, btol (float):
|
| 114 |
+
Stopping tolerances. `lsmr` continues iterations until a
|
| 115 |
+
certain backward error estimate is smaller than some quantity
|
| 116 |
+
depending on atol and btol.
|
| 117 |
+
conlim (float): `lsmr` terminates if an estimate of ``cond(A)`` i.e.
|
| 118 |
+
condition number of matrix exceeds `conlim`. If `conlim` is None,
|
| 119 |
+
the default value is 1e+8.
|
| 120 |
+
maxiter (int): Maximum number of iterations.
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
tuple:
|
| 124 |
+
- `x` (ndarray): Least-square solution returned.
|
| 125 |
+
- `istop` (int): istop gives the reason for stopping::
|
| 126 |
+
|
| 127 |
+
0 means x=0 is a solution.
|
| 128 |
+
|
| 129 |
+
1 means x is an approximate solution to A*x = B,
|
| 130 |
+
according to atol and btol.
|
| 131 |
+
|
| 132 |
+
2 means x approximately solves the least-squares problem
|
| 133 |
+
according to atol.
|
| 134 |
+
|
| 135 |
+
3 means COND(A) seems to be greater than CONLIM.
|
| 136 |
+
|
| 137 |
+
4 is the same as 1 with atol = btol = eps (machine
|
| 138 |
+
precision)
|
| 139 |
+
|
| 140 |
+
5 is the same as 2 with atol = eps.
|
| 141 |
+
|
| 142 |
+
6 is the same as 3 with CONLIM = 1/eps.
|
| 143 |
+
|
| 144 |
+
7 means ITN reached maxiter before the other stopping
|
| 145 |
+
conditions were satisfied.
|
| 146 |
+
|
| 147 |
+
- `itn` (int): Number of iterations used.
|
| 148 |
+
- `normr` (float): ``norm(b-Ax)``
|
| 149 |
+
- `normar` (float): ``norm(A^T (b - Ax))``
|
| 150 |
+
- `norma` (float): ``norm(A)``
|
| 151 |
+
- `conda` (float): Condition number of A.
|
| 152 |
+
- `normx` (float): ``norm(x)``
|
| 153 |
+
|
| 154 |
+
.. seealso:: :func:`scipy.sparse.linalg.lsmr`
|
| 155 |
+
|
| 156 |
+
References:
|
| 157 |
+
D. C.-L. Fong and M. A. Saunders, "LSMR: An iterative algorithm for
|
| 158 |
+
sparse least-squares problems", SIAM J. Sci. Comput.,
|
| 159 |
+
vol. 33, pp. 2950-2971, 2011.
|
| 160 |
+
"""
|
| 161 |
+
A = _interface.aslinearoperator(A)
|
| 162 |
+
b = b.squeeze()
|
| 163 |
+
matvec = A.matvec
|
| 164 |
+
rmatvec = A.rmatvec
|
| 165 |
+
m, n = A.shape
|
| 166 |
+
minDim = min([m, n])
|
| 167 |
+
|
| 168 |
+
if maxiter is None:
|
| 169 |
+
maxiter = minDim * 5
|
| 170 |
+
|
| 171 |
+
u = b.copy()
|
| 172 |
+
normb = cublas.nrm2(b)
|
| 173 |
+
beta = normb.copy()
|
| 174 |
+
normb = normb.get().item()
|
| 175 |
+
if x0 is None:
|
| 176 |
+
x = cupy.zeros((n,), dtype=A.dtype)
|
| 177 |
+
else:
|
| 178 |
+
if not (x0.shape == (n,) or x0.shape == (n, 1)):
|
| 179 |
+
raise ValueError('x0 has incompatible dimensions')
|
| 180 |
+
x = x0.astype(A.dtype).ravel()
|
| 181 |
+
u -= matvec(x)
|
| 182 |
+
beta = cublas.nrm2(u)
|
| 183 |
+
|
| 184 |
+
beta_cpu = beta.get().item()
|
| 185 |
+
|
| 186 |
+
v = cupy.zeros(n)
|
| 187 |
+
alpha = cupy.zeros((), dtype=beta.dtype)
|
| 188 |
+
alpha_cpu = 0
|
| 189 |
+
|
| 190 |
+
if beta_cpu > 0:
|
| 191 |
+
u /= beta
|
| 192 |
+
v = rmatvec(u)
|
| 193 |
+
alpha = cublas.nrm2(v)
|
| 194 |
+
alpha_cpu = alpha.get().item()
|
| 195 |
+
|
| 196 |
+
if alpha_cpu > 0:
|
| 197 |
+
v /= alpha
|
| 198 |
+
|
| 199 |
+
# Initialize variables for 1st iteration.
|
| 200 |
+
|
| 201 |
+
itn = 0
|
| 202 |
+
zetabar = alpha_cpu * beta_cpu
|
| 203 |
+
alphabar = alpha_cpu
|
| 204 |
+
rho = 1
|
| 205 |
+
rhobar = 1
|
| 206 |
+
cbar = 1
|
| 207 |
+
sbar = 0
|
| 208 |
+
|
| 209 |
+
h = v.copy()
|
| 210 |
+
hbar = cupy.zeros(n)
|
| 211 |
+
# x = cupy.zeros(n)
|
| 212 |
+
|
| 213 |
+
# Initialize variables for estimation of ||r||.
|
| 214 |
+
|
| 215 |
+
betadd = beta_cpu
|
| 216 |
+
betad = 0
|
| 217 |
+
rhodold = 1
|
| 218 |
+
tautildeold = 0
|
| 219 |
+
thetatilde = 0
|
| 220 |
+
zeta = 0
|
| 221 |
+
d = 0
|
| 222 |
+
|
| 223 |
+
# Initialize variables for estimation of ||A|| and cond(A)
|
| 224 |
+
|
| 225 |
+
normA2 = alpha_cpu * alpha_cpu
|
| 226 |
+
maxrbar = 0
|
| 227 |
+
minrbar = 1e+100
|
| 228 |
+
normA = alpha_cpu
|
| 229 |
+
condA = 1
|
| 230 |
+
normx = 0
|
| 231 |
+
|
| 232 |
+
# Items for use in stopping rules.
|
| 233 |
+
istop = 0
|
| 234 |
+
ctol = 0
|
| 235 |
+
if conlim > 0:
|
| 236 |
+
ctol = 1 / conlim
|
| 237 |
+
normr = beta_cpu
|
| 238 |
+
|
| 239 |
+
# Golub-Kahan process terminates when either alpha or beta is zero.
|
| 240 |
+
# Reverse the order here from the original matlab code because
|
| 241 |
+
# there was an error on return when arnorm==0
|
| 242 |
+
normar = alpha_cpu * beta_cpu
|
| 243 |
+
if normar == 0:
|
| 244 |
+
return x, istop, itn, normr, normar, normA, condA, normx
|
| 245 |
+
|
| 246 |
+
# Main iteration loop.
|
| 247 |
+
while itn < maxiter:
|
| 248 |
+
itn = itn + 1
|
| 249 |
+
|
| 250 |
+
# Perform the next step of the bidiagonalization to obtain the
|
| 251 |
+
# next beta, u, alpha, v. These satisfy the relations
|
| 252 |
+
# beta*u = a*v - alpha*u,
|
| 253 |
+
# alpha*v = A'*u - beta*v.
|
| 254 |
+
|
| 255 |
+
u *= -alpha
|
| 256 |
+
u += matvec(v)
|
| 257 |
+
beta = cublas.nrm2(u) # norm(u)
|
| 258 |
+
beta_cpu = beta.get().item()
|
| 259 |
+
|
| 260 |
+
if beta_cpu > 0:
|
| 261 |
+
u /= beta
|
| 262 |
+
v *= -beta
|
| 263 |
+
v += rmatvec(u)
|
| 264 |
+
alpha = cublas.nrm2(v) # norm(v)
|
| 265 |
+
alpha_cpu = alpha.get().item()
|
| 266 |
+
if alpha_cpu > 0:
|
| 267 |
+
v /= alpha
|
| 268 |
+
|
| 269 |
+
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
|
| 270 |
+
|
| 271 |
+
# Construct rotation Qhat_{k,2k+1}.
|
| 272 |
+
|
| 273 |
+
chat, shat, alphahat = _symOrtho(alphabar, damp)
|
| 274 |
+
|
| 275 |
+
# Use a plane rotation (Q_i) to turn B_i to R_i
|
| 276 |
+
|
| 277 |
+
rhoold = rho
|
| 278 |
+
c, s, rho = _symOrtho(alphahat, beta_cpu)
|
| 279 |
+
thetanew = s * alpha_cpu
|
| 280 |
+
alphabar = c * alpha_cpu
|
| 281 |
+
|
| 282 |
+
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
|
| 283 |
+
|
| 284 |
+
rhobarold = rhobar
|
| 285 |
+
zetaold = zeta
|
| 286 |
+
thetabar = sbar * rho
|
| 287 |
+
rhotemp = cbar * rho
|
| 288 |
+
cbar, sbar, rhobar = _symOrtho(cbar * rho, thetanew)
|
| 289 |
+
zeta = cbar * zetabar
|
| 290 |
+
zetabar = - sbar * zetabar
|
| 291 |
+
|
| 292 |
+
# Update h, h_hat, x.
|
| 293 |
+
|
| 294 |
+
# hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar
|
| 295 |
+
hbar *= -(thetabar * rho / (rhoold * rhobarold))
|
| 296 |
+
hbar += h
|
| 297 |
+
x += (zeta / (rho * rhobar)) * hbar
|
| 298 |
+
# h = v - (thetanew / rho) * h
|
| 299 |
+
h *= -(thetanew / rho)
|
| 300 |
+
h += v
|
| 301 |
+
|
| 302 |
+
# Estimate of ||r||.
|
| 303 |
+
|
| 304 |
+
# Apply rotation Qhat_{k,2k+1}.
|
| 305 |
+
betaacute = chat * betadd
|
| 306 |
+
betacheck = -shat * betadd
|
| 307 |
+
|
| 308 |
+
# Apply rotation Q_{k,k+1}.
|
| 309 |
+
betahat = c * betaacute
|
| 310 |
+
betadd = -s * betaacute
|
| 311 |
+
|
| 312 |
+
# Apply rotation Qtilde_{k-1}.
|
| 313 |
+
# betad = betad_{k-1} here.
|
| 314 |
+
|
| 315 |
+
thetatildeold = thetatilde
|
| 316 |
+
ctildeold, stildeold, rhotildeold = _symOrtho(rhodold, thetabar)
|
| 317 |
+
thetatilde = stildeold * rhobar
|
| 318 |
+
rhodold = ctildeold * rhobar
|
| 319 |
+
betad = - stildeold * betad + ctildeold * betahat
|
| 320 |
+
|
| 321 |
+
# betad = betad_k here.
|
| 322 |
+
# rhodold = rhod_k here.
|
| 323 |
+
|
| 324 |
+
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
|
| 325 |
+
taud = (zeta - thetatilde * tautildeold) / rhodold
|
| 326 |
+
d = d + betacheck * betacheck
|
| 327 |
+
normr = numpy.sqrt(d + (betad - taud)**2 + betadd * betadd)
|
| 328 |
+
|
| 329 |
+
# Estimate ||A||.
|
| 330 |
+
normA2 = normA2 + beta_cpu * beta_cpu
|
| 331 |
+
normA = numpy.sqrt(normA2)
|
| 332 |
+
normA2 = normA2 + alpha_cpu * alpha_cpu
|
| 333 |
+
|
| 334 |
+
# Estimate cond(A).
|
| 335 |
+
maxrbar = max(maxrbar, rhobarold)
|
| 336 |
+
if itn > 1:
|
| 337 |
+
minrbar = min(minrbar, rhobarold)
|
| 338 |
+
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
|
| 339 |
+
|
| 340 |
+
# Test for convergence.
|
| 341 |
+
|
| 342 |
+
# Compute norms for convergence testing.
|
| 343 |
+
normar = abs(zetabar)
|
| 344 |
+
normx = cublas.nrm2(x)
|
| 345 |
+
normx = normx.get().item()
|
| 346 |
+
|
| 347 |
+
# Now use these norms to estimate certain other quantities,
|
| 348 |
+
# some of which will be small near a solution.
|
| 349 |
+
|
| 350 |
+
test1 = normr / normb
|
| 351 |
+
if (normA * normr) != 0:
|
| 352 |
+
test2 = normar / (normA * normr)
|
| 353 |
+
else:
|
| 354 |
+
test2 = numpy.inf
|
| 355 |
+
test3 = 1 / condA
|
| 356 |
+
t1 = test1 / (1 + normA*normx/normb)
|
| 357 |
+
rtol = btol + atol*normA*normx/normb
|
| 358 |
+
|
| 359 |
+
# The following tests guard against extremely small values of
|
| 360 |
+
# atol, btol or ctol. (The user may have set any or all of
|
| 361 |
+
# the parameters atol, btol, conlim to 0.)
|
| 362 |
+
# The effect is equivalent to the normAl tests using
|
| 363 |
+
# atol = eps, btol = eps, conlim = 1/eps.
|
| 364 |
+
|
| 365 |
+
if itn >= maxiter:
|
| 366 |
+
istop = 7
|
| 367 |
+
if 1 + test3 <= 1:
|
| 368 |
+
istop = 6
|
| 369 |
+
if 1 + test2 <= 1:
|
| 370 |
+
istop = 5
|
| 371 |
+
if 1 + t1 <= 1:
|
| 372 |
+
istop = 4
|
| 373 |
+
|
| 374 |
+
# Allow for tolerances set by the user.
|
| 375 |
+
|
| 376 |
+
if test3 <= ctol:
|
| 377 |
+
istop = 3
|
| 378 |
+
if test2 <= atol:
|
| 379 |
+
istop = 2
|
| 380 |
+
if test1 <= rtol:
|
| 381 |
+
istop = 1
|
| 382 |
+
|
| 383 |
+
if istop > 0:
|
| 384 |
+
break
|
| 385 |
+
|
| 386 |
+
# The return type of SciPy is always float64. Therefore, x must be casted.
|
| 387 |
+
x = x.astype(numpy.float64)
|
| 388 |
+
|
| 389 |
+
return x, istop, itn, normr, normar, normA, condA, normx
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def _should_use_spsm(b):
|
| 393 |
+
from cupy_backends.cuda.libs import cusparse as _cusparse
|
| 394 |
+
|
| 395 |
+
if not runtime.is_hip:
|
| 396 |
+
# Starting with CUDA 12.0, we use cusparseSpSM
|
| 397 |
+
return _cusparse.get_build_version() >= 12000
|
| 398 |
+
else:
|
| 399 |
+
# Keep using hipsparse<t>csrsm2 for ROCm before it is dropped
|
| 400 |
+
return False
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
|
| 404 |
+
unit_diagonal=False):
|
| 405 |
+
"""Solves a sparse triangular system ``A x = b``.
|
| 406 |
+
|
| 407 |
+
Args:
|
| 408 |
+
A (cupyx.scipy.sparse.spmatrix):
|
| 409 |
+
Sparse matrix with dimension ``(M, M)``.
|
| 410 |
+
b (cupy.ndarray):
|
| 411 |
+
Dense vector or matrix with dimension ``(M)`` or ``(M, K)``.
|
| 412 |
+
lower (bool):
|
| 413 |
+
Whether ``A`` is a lower or upper triangular matrix.
|
| 414 |
+
If True, it is lower triangular, otherwise, upper triangular.
|
| 415 |
+
overwrite_A (bool):
|
| 416 |
+
(not supported)
|
| 417 |
+
overwrite_b (bool):
|
| 418 |
+
Allows overwriting data in ``b``.
|
| 419 |
+
unit_diagonal (bool):
|
| 420 |
+
If True, diagonal elements of ``A`` are assumed to be 1 and will
|
| 421 |
+
not be referenced.
|
| 422 |
+
|
| 423 |
+
Returns:
|
| 424 |
+
cupy.ndarray:
|
| 425 |
+
Solution to the system ``A x = b``. The shape is the same as ``b``.
|
| 426 |
+
"""
|
| 427 |
+
from cupyx import cusparse
|
| 428 |
+
|
| 429 |
+
if not (cusparse.check_availability('spsm') or
|
| 430 |
+
cusparse.check_availability('csrsm2')):
|
| 431 |
+
raise NotImplementedError
|
| 432 |
+
|
| 433 |
+
if not sparse.isspmatrix(A):
|
| 434 |
+
raise TypeError('A must be cupyx.scipy.sparse.spmatrix')
|
| 435 |
+
if not isinstance(b, cupy.ndarray):
|
| 436 |
+
raise TypeError('b must be cupy.ndarray')
|
| 437 |
+
if A.shape[0] != A.shape[1]:
|
| 438 |
+
raise ValueError(f'A must be a square matrix (A.shape: {A.shape})')
|
| 439 |
+
if b.ndim not in [1, 2]:
|
| 440 |
+
raise ValueError(f'b must be 1D or 2D array (b.shape: {b.shape})')
|
| 441 |
+
if A.shape[0] != b.shape[0]:
|
| 442 |
+
raise ValueError('The size of dimensions of A must be equal to the '
|
| 443 |
+
'size of the first dimension of b '
|
| 444 |
+
f'(A.shape: {A.shape}, b.shape: {b.shape})')
|
| 445 |
+
if A.dtype.char not in 'fdFD':
|
| 446 |
+
raise TypeError(f'unsupported dtype (actual: {A.dtype})')
|
| 447 |
+
|
| 448 |
+
if cusparse.check_availability('spsm') and _should_use_spsm(b):
|
| 449 |
+
if not (sparse.isspmatrix_csr(A) or
|
| 450 |
+
sparse.isspmatrix_csc(A) or
|
| 451 |
+
sparse.isspmatrix_coo(A)):
|
| 452 |
+
warnings.warn('CSR, CSC or COO format is required. Converting to '
|
| 453 |
+
'CSR format.', sparse.SparseEfficiencyWarning)
|
| 454 |
+
A = A.tocsr()
|
| 455 |
+
A.sum_duplicates()
|
| 456 |
+
x = cusparse.spsm(A, b, lower=lower, unit_diag=unit_diagonal)
|
| 457 |
+
elif cusparse.check_availability('csrsm2'):
|
| 458 |
+
if not (sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A)):
|
| 459 |
+
warnings.warn('CSR or CSC format is required. Converting to CSR '
|
| 460 |
+
'format.', sparse.SparseEfficiencyWarning)
|
| 461 |
+
A = A.tocsr()
|
| 462 |
+
A.sum_duplicates()
|
| 463 |
+
|
| 464 |
+
if (overwrite_b and A.dtype == b.dtype and
|
| 465 |
+
(b._c_contiguous or b._f_contiguous)):
|
| 466 |
+
x = b
|
| 467 |
+
else:
|
| 468 |
+
x = b.astype(A.dtype, copy=True)
|
| 469 |
+
|
| 470 |
+
cusparse.csrsm2(A, x, lower=lower, unit_diag=unit_diagonal)
|
| 471 |
+
else:
|
| 472 |
+
assert False
|
| 473 |
+
|
| 474 |
+
if x.dtype.char in 'fF':
|
| 475 |
+
# Note: This is for compatibility with SciPy.
|
| 476 |
+
dtype = numpy.promote_types(x.dtype, 'float64')
|
| 477 |
+
x = x.astype(dtype)
|
| 478 |
+
return x
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def spsolve(A, b):
|
| 482 |
+
"""Solves a sparse linear system ``A x = b``
|
| 483 |
+
|
| 484 |
+
Args:
|
| 485 |
+
A (cupyx.scipy.sparse.spmatrix):
|
| 486 |
+
Sparse matrix with dimension ``(M, M)``.
|
| 487 |
+
b (cupy.ndarray):
|
| 488 |
+
Dense vector or matrix with dimension ``(M)`` or ``(M, N)``.
|
| 489 |
+
|
| 490 |
+
Returns:
|
| 491 |
+
cupy.ndarray:
|
| 492 |
+
Solution to the system ``A x = b``.
|
| 493 |
+
"""
|
| 494 |
+
import cupyx.cusolver
|
| 495 |
+
|
| 496 |
+
if not cupyx.cusolver.check_availability('csrlsvqr'):
|
| 497 |
+
raise NotImplementedError
|
| 498 |
+
if not sparse.isspmatrix(A):
|
| 499 |
+
raise TypeError('A must be cupyx.scipy.sparse.spmatrix')
|
| 500 |
+
if not isinstance(b, cupy.ndarray):
|
| 501 |
+
raise TypeError('b must be cupy.ndarray')
|
| 502 |
+
if A.shape[0] != A.shape[1]:
|
| 503 |
+
raise ValueError('A must be a square matrix (A.shape: {})'.
|
| 504 |
+
format(A.shape))
|
| 505 |
+
if not (b.ndim == 1 or b.ndim == 2):
|
| 506 |
+
raise ValueError('Invalid b.shape (b.shape: {})'.format(b.shape))
|
| 507 |
+
if A.shape[0] != b.shape[0]:
|
| 508 |
+
raise ValueError('matrix dimension mismatch (A.shape: {}, b.shape: {})'
|
| 509 |
+
.format(A.shape, b.shape))
|
| 510 |
+
|
| 511 |
+
if not sparse.isspmatrix_csr(A):
|
| 512 |
+
warnings.warn('CSR format is required. Converting to CSR format.',
|
| 513 |
+
sparse.SparseEfficiencyWarning)
|
| 514 |
+
A = A.tocsr()
|
| 515 |
+
A.sum_duplicates()
|
| 516 |
+
b = b.astype(A.dtype, copy=False)
|
| 517 |
+
|
| 518 |
+
if b.ndim > 1:
|
| 519 |
+
res = cupy.empty_like(b)
|
| 520 |
+
for j in range(res.shape[1]):
|
| 521 |
+
res[:, j] = cupyx.cusolver.csrlsvqr(A, b[:, j])
|
| 522 |
+
res = cupy.asarray(res, order='F')
|
| 523 |
+
return res
|
| 524 |
+
else:
|
| 525 |
+
return cupyx.cusolver.csrlsvqr(A, b)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
class SuperLU():
|
| 529 |
+
|
| 530 |
+
def __init__(self, obj):
|
| 531 |
+
"""LU factorization of a sparse matrix.
|
| 532 |
+
|
| 533 |
+
Args:
|
| 534 |
+
obj (scipy.sparse.linalg.SuperLU): LU factorization of a sparse
|
| 535 |
+
matrix, computed by `scipy.sparse.linalg.splu`, etc.
|
| 536 |
+
"""
|
| 537 |
+
if not scipy_available:
|
| 538 |
+
raise RuntimeError('scipy is not available')
|
| 539 |
+
if not isinstance(obj, scipy.sparse.linalg.SuperLU):
|
| 540 |
+
raise TypeError('obj must be scipy.sparse.linalg.SuperLU')
|
| 541 |
+
|
| 542 |
+
self.shape = obj.shape
|
| 543 |
+
self.nnz = obj.nnz
|
| 544 |
+
self.perm_r = cupy.array(obj.perm_r)
|
| 545 |
+
self.perm_c = cupy.array(obj.perm_c)
|
| 546 |
+
self.L = sparse.csr_matrix(obj.L.tocsr())
|
| 547 |
+
self.U = sparse.csr_matrix(obj.U.tocsr())
|
| 548 |
+
|
| 549 |
+
self._perm_r_rev = cupy.argsort(self.perm_r)
|
| 550 |
+
self._perm_c_rev = cupy.argsort(self.perm_c)
|
| 551 |
+
|
| 552 |
+
def solve(self, rhs, trans='N'):
|
| 553 |
+
"""Solves linear system of equations with one or several right-hand sides.
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
rhs (cupy.ndarray): Right-hand side(s) of equation with dimension
|
| 557 |
+
``(M)`` or ``(M, K)``.
|
| 558 |
+
trans (str): 'N', 'T' or 'H'.
|
| 559 |
+
'N': Solves ``A * x = rhs``.
|
| 560 |
+
'T': Solves ``A.T * x = rhs``.
|
| 561 |
+
'H': Solves ``A.conj().T * x = rhs``.
|
| 562 |
+
|
| 563 |
+
Returns:
|
| 564 |
+
cupy.ndarray:
|
| 565 |
+
Solution vector(s)
|
| 566 |
+
""" # NOQA
|
| 567 |
+
from cupyx import cusparse
|
| 568 |
+
|
| 569 |
+
if not isinstance(rhs, cupy.ndarray):
|
| 570 |
+
raise TypeError('ojb must be cupy.ndarray')
|
| 571 |
+
if rhs.ndim not in (1, 2):
|
| 572 |
+
raise ValueError('rhs.ndim must be 1 or 2 (actual: {})'.
|
| 573 |
+
format(rhs.ndim))
|
| 574 |
+
if rhs.shape[0] != self.shape[0]:
|
| 575 |
+
raise ValueError('shape mismatch (self.shape: {}, rhs.shape: {})'
|
| 576 |
+
.format(self.shape, rhs.shape))
|
| 577 |
+
if trans not in ('N', 'T', 'H'):
|
| 578 |
+
raise ValueError('trans must be \'N\', \'T\', or \'H\'')
|
| 579 |
+
|
| 580 |
+
if cusparse.check_availability('spsm') and _should_use_spsm(rhs):
|
| 581 |
+
def spsm(A, B, lower, transa):
|
| 582 |
+
return cusparse.spsm(A, B, lower=lower, transa=transa)
|
| 583 |
+
sm = spsm
|
| 584 |
+
elif cusparse.check_availability('csrsm2'):
|
| 585 |
+
def csrsm2(A, B, lower, transa):
|
| 586 |
+
cusparse.csrsm2(A, B, lower=lower, transa=transa)
|
| 587 |
+
return B
|
| 588 |
+
sm = csrsm2
|
| 589 |
+
else:
|
| 590 |
+
raise NotImplementedError
|
| 591 |
+
|
| 592 |
+
x = rhs.astype(self.L.dtype)
|
| 593 |
+
if trans == 'N':
|
| 594 |
+
if self.perm_r is not None:
|
| 595 |
+
if x.ndim == 2 and x._f_contiguous:
|
| 596 |
+
x = x.T[:, self._perm_r_rev].T # want to keep f-order
|
| 597 |
+
else:
|
| 598 |
+
x = x[self._perm_r_rev]
|
| 599 |
+
x = sm(self.L, x, lower=True, transa=trans)
|
| 600 |
+
x = sm(self.U, x, lower=False, transa=trans)
|
| 601 |
+
if self.perm_c is not None:
|
| 602 |
+
x = x[self.perm_c]
|
| 603 |
+
else:
|
| 604 |
+
if self.perm_c is not None:
|
| 605 |
+
if x.ndim == 2 and x._f_contiguous:
|
| 606 |
+
x = x.T[:, self._perm_c_rev].T # want to keep f-order
|
| 607 |
+
else:
|
| 608 |
+
x = x[self._perm_c_rev]
|
| 609 |
+
x = sm(self.U, x, lower=False, transa=trans)
|
| 610 |
+
x = sm(self.L, x, lower=True, transa=trans)
|
| 611 |
+
if self.perm_r is not None:
|
| 612 |
+
x = x[self.perm_r]
|
| 613 |
+
|
| 614 |
+
if not x._f_contiguous:
|
| 615 |
+
# For compatibility with SciPy
|
| 616 |
+
x = x.copy(order='F')
|
| 617 |
+
return x
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
class CusparseLU(SuperLU):
|
| 621 |
+
|
| 622 |
+
def __init__(self, a):
|
| 623 |
+
"""Incomplete LU factorization of a sparse matrix.
|
| 624 |
+
|
| 625 |
+
Args:
|
| 626 |
+
a (cupyx.scipy.sparse.csr_matrix): Incomplete LU factorization of a
|
| 627 |
+
sparse matrix, computed by `cusparse.csrilu02`.
|
| 628 |
+
"""
|
| 629 |
+
if not scipy_available:
|
| 630 |
+
raise RuntimeError('scipy is not available')
|
| 631 |
+
if not sparse.isspmatrix_csr(a):
|
| 632 |
+
raise TypeError('a must be cupyx.scipy.sparse.csr_matrix')
|
| 633 |
+
|
| 634 |
+
self.shape = a.shape
|
| 635 |
+
self.nnz = a.nnz
|
| 636 |
+
self.perm_r = None
|
| 637 |
+
self.perm_c = None
|
| 638 |
+
# TODO(anaruse): Computes tril and triu on GPU
|
| 639 |
+
a = a.get()
|
| 640 |
+
al = scipy.sparse.tril(a)
|
| 641 |
+
al.setdiag(1.0)
|
| 642 |
+
au = scipy.sparse.triu(a)
|
| 643 |
+
self.L = sparse.csr_matrix(al.tocsr())
|
| 644 |
+
self.U = sparse.csr_matrix(au.tocsr())
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def factorized(A):
|
| 648 |
+
"""Return a function for solving a sparse linear system, with A pre-factorized.
|
| 649 |
+
|
| 650 |
+
Args:
|
| 651 |
+
A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize.
|
| 652 |
+
|
| 653 |
+
Returns:
|
| 654 |
+
callable: a function to solve the linear system of equations given in
|
| 655 |
+
``A``.
|
| 656 |
+
|
| 657 |
+
Note:
|
| 658 |
+
This function computes LU decomposition of a sparse matrix on the CPU
|
| 659 |
+
using `scipy.sparse.linalg.splu`. Therefore, LU decomposition is not
|
| 660 |
+
accelerated on the GPU. On the other hand, the computation of solving
|
| 661 |
+
linear equations using the method returned by this function is
|
| 662 |
+
performed on the GPU.
|
| 663 |
+
|
| 664 |
+
.. seealso:: :func:`scipy.sparse.linalg.factorized`
|
| 665 |
+
""" # NOQA
|
| 666 |
+
return splu(A).solve
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def splu(A, permc_spec=None, diag_pivot_thresh=None, relax=None,
|
| 670 |
+
panel_size=None, options={}):
|
| 671 |
+
"""Computes the LU decomposition of a sparse square matrix.
|
| 672 |
+
|
| 673 |
+
Args:
|
| 674 |
+
A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize.
|
| 675 |
+
permc_spec (str): (For further augments, see
|
| 676 |
+
:func:`scipy.sparse.linalg.splu`)
|
| 677 |
+
diag_pivot_thresh (float):
|
| 678 |
+
relax (int):
|
| 679 |
+
panel_size (int):
|
| 680 |
+
options (dict):
|
| 681 |
+
|
| 682 |
+
Returns:
|
| 683 |
+
cupyx.scipy.sparse.linalg.SuperLU:
|
| 684 |
+
Object which has a ``solve`` method.
|
| 685 |
+
|
| 686 |
+
Note:
|
| 687 |
+
This function LU-decomposes a sparse matrix on the CPU using
|
| 688 |
+
`scipy.sparse.linalg.splu`. Therefore, LU decomposition is not
|
| 689 |
+
accelerated on the GPU. On the other hand, the computation of solving
|
| 690 |
+
linear equations using the ``solve`` method, which this function
|
| 691 |
+
returns, is performed on the GPU.
|
| 692 |
+
|
| 693 |
+
.. seealso:: :func:`scipy.sparse.linalg.splu`
|
| 694 |
+
"""
|
| 695 |
+
if not scipy_available:
|
| 696 |
+
raise RuntimeError('scipy is not available')
|
| 697 |
+
if not sparse.isspmatrix(A):
|
| 698 |
+
raise TypeError('A must be cupyx.scipy.sparse.spmatrix')
|
| 699 |
+
if A.shape[0] != A.shape[1]:
|
| 700 |
+
raise ValueError('A must be a square matrix (A.shape: {})'
|
| 701 |
+
.format(A.shape))
|
| 702 |
+
if A.dtype.char not in 'fdFD':
|
| 703 |
+
raise TypeError('Invalid dtype (actual: {})'.format(A.dtype))
|
| 704 |
+
|
| 705 |
+
a = A.get().tocsc()
|
| 706 |
+
a_inv = scipy.sparse.linalg.splu(
|
| 707 |
+
a, permc_spec=permc_spec, diag_pivot_thresh=diag_pivot_thresh,
|
| 708 |
+
relax=relax, panel_size=panel_size, options=options)
|
| 709 |
+
return SuperLU(a_inv)
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None,
|
| 713 |
+
permc_spec=None, diag_pivot_thresh=None, relax=None,
|
| 714 |
+
panel_size=None, options={}):
|
| 715 |
+
"""Computes the incomplete LU decomposition of a sparse square matrix.
|
| 716 |
+
|
| 717 |
+
Args:
|
| 718 |
+
A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize.
|
| 719 |
+
drop_tol (float): (For further augments, see
|
| 720 |
+
:func:`scipy.sparse.linalg.spilu`)
|
| 721 |
+
fill_factor (float):
|
| 722 |
+
drop_rule (str):
|
| 723 |
+
permc_spec (str):
|
| 724 |
+
diag_pivot_thresh (float):
|
| 725 |
+
relax (int):
|
| 726 |
+
panel_size (int):
|
| 727 |
+
options (dict):
|
| 728 |
+
|
| 729 |
+
Returns:
|
| 730 |
+
cupyx.scipy.sparse.linalg.SuperLU:
|
| 731 |
+
Object which has a ``solve`` method.
|
| 732 |
+
|
| 733 |
+
Note:
|
| 734 |
+
This function computes incomplete LU decomposition of a sparse matrix
|
| 735 |
+
on the CPU using `scipy.sparse.linalg.spilu` (unless you set
|
| 736 |
+
``fill_factor`` to ``1``). Therefore, incomplete LU decomposition is
|
| 737 |
+
not accelerated on the GPU. On the other hand, the computation of
|
| 738 |
+
solving linear equations using the ``solve`` method, which this
|
| 739 |
+
function returns, is performed on the GPU.
|
| 740 |
+
|
| 741 |
+
If you set ``fill_factor`` to ``1``, this function computes incomplete
|
| 742 |
+
LU decomposition on the GPU, but without fill-in or pivoting.
|
| 743 |
+
|
| 744 |
+
.. seealso:: :func:`scipy.sparse.linalg.spilu`
|
| 745 |
+
"""
|
| 746 |
+
from cupyx import cusparse
|
| 747 |
+
|
| 748 |
+
if not scipy_available:
|
| 749 |
+
raise RuntimeError('scipy is not available')
|
| 750 |
+
if not sparse.isspmatrix(A):
|
| 751 |
+
raise TypeError('A must be cupyx.scipy.sparse.spmatrix')
|
| 752 |
+
if A.shape[0] != A.shape[1]:
|
| 753 |
+
raise ValueError('A must be a square matrix (A.shape: {})'
|
| 754 |
+
.format(A.shape))
|
| 755 |
+
if A.dtype.char not in 'fdFD':
|
| 756 |
+
raise TypeError('Invalid dtype (actual: {})'.format(A.dtype))
|
| 757 |
+
|
| 758 |
+
if fill_factor == 1:
|
| 759 |
+
# Computes ILU(0) on the GPU using cuSparse functions
|
| 760 |
+
if not sparse.isspmatrix_csr(A):
|
| 761 |
+
a = A.tocsr()
|
| 762 |
+
else:
|
| 763 |
+
a = A.copy()
|
| 764 |
+
cusparse.csrilu02(a)
|
| 765 |
+
return CusparseLU(a)
|
| 766 |
+
|
| 767 |
+
a = A.get().tocsc()
|
| 768 |
+
a_inv = scipy.sparse.linalg.spilu(
|
| 769 |
+
a, fill_factor=fill_factor, drop_tol=drop_tol, drop_rule=drop_rule,
|
| 770 |
+
permc_spec=permc_spec, diag_pivot_thresh=diag_pivot_thresh,
|
| 771 |
+
relax=relax, panel_size=panel_size, options=options)
|
| 772 |
+
return SuperLU(a_inv)
|
| 773 |
+
|
| 774 |
+
|
| 775 |
+
def _symOrtho(a, b):
|
| 776 |
+
"""
|
| 777 |
+
A stable implementation of Givens rotation according to
|
| 778 |
+
S.-C. Choi, "Iterative Methods for Singular Linear Equations
|
| 779 |
+
and Least-Squares Problems", Dissertation,
|
| 780 |
+
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
|
| 781 |
+
"""
|
| 782 |
+
if b == 0:
|
| 783 |
+
return numpy.sign(a), 0, abs(a)
|
| 784 |
+
elif a == 0:
|
| 785 |
+
return 0, numpy.sign(b), abs(b)
|
| 786 |
+
elif abs(b) > abs(a):
|
| 787 |
+
tau = a / b
|
| 788 |
+
s = numpy.sign(b) / numpy.sqrt(1+tau*tau)
|
| 789 |
+
c = s * tau
|
| 790 |
+
r = b / s
|
| 791 |
+
else:
|
| 792 |
+
tau = b / a
|
| 793 |
+
c = numpy.sign(a) / numpy.sqrt(1+tau*tau)
|
| 794 |
+
s = c * tau
|
| 795 |
+
r = a / c
|
| 796 |
+
return c, s, r
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
|
| 800 |
+
M=None, callback=None, check=False):
|
| 801 |
+
"""Uses MINimum RESidual iteration to solve ``Ax = b``.
|
| 802 |
+
|
| 803 |
+
Args:
|
| 804 |
+
A (ndarray, spmatrix or LinearOperator): The real or complex matrix of
|
| 805 |
+
the linear system with shape ``(n, n)``.
|
| 806 |
+
b (cupy.ndarray): Right hand side of the linear system with shape
|
| 807 |
+
``(n,)`` or ``(n, 1)``.
|
| 808 |
+
x0 (cupy.ndarray): Starting guess for the solution.
|
| 809 |
+
shift (int or float): If shift != 0 then the method solves
|
| 810 |
+
``(A - shift*I)x = b``
|
| 811 |
+
tol (float): Tolerance for convergence.
|
| 812 |
+
maxiter (int): Maximum number of iterations.
|
| 813 |
+
M (ndarray, spmatrix or LinearOperator): Preconditioner for ``A``.
|
| 814 |
+
The preconditioner should approximate the inverse of ``A``.
|
| 815 |
+
``M`` must be :class:`cupy.ndarray`,
|
| 816 |
+
:class:`cupyx.scipy.sparse.spmatrix` or
|
| 817 |
+
:class:`cupyx.scipy.sparse.linalg.LinearOperator`.
|
| 818 |
+
callback (function): User-specified function to call after each
|
| 819 |
+
iteration. It is called as ``callback(xk)``, where ``xk`` is the
|
| 820 |
+
current solution vector.
|
| 821 |
+
|
| 822 |
+
Returns:
|
| 823 |
+
tuple:
|
| 824 |
+
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
|
| 825 |
+
the converged solution and ``info`` provides convergence
|
| 826 |
+
information.
|
| 827 |
+
|
| 828 |
+
.. seealso:: :func:`scipy.sparse.linalg.minres`
|
| 829 |
+
"""
|
| 830 |
+
A, M, x, b = _make_system(A, M, x0, b)
|
| 831 |
+
|
| 832 |
+
matvec = A.matvec
|
| 833 |
+
psolve = M.matvec
|
| 834 |
+
|
| 835 |
+
n = b.shape[0]
|
| 836 |
+
|
| 837 |
+
if maxiter is None:
|
| 838 |
+
maxiter = n * 5
|
| 839 |
+
|
| 840 |
+
istop = 0
|
| 841 |
+
itn = 0
|
| 842 |
+
Anorm = 0
|
| 843 |
+
Acond = 0
|
| 844 |
+
rnorm = 0
|
| 845 |
+
ynorm = 0
|
| 846 |
+
|
| 847 |
+
xtype = x.dtype
|
| 848 |
+
|
| 849 |
+
eps = cupy.finfo(xtype).eps
|
| 850 |
+
|
| 851 |
+
Ax = matvec(x)
|
| 852 |
+
r1 = b - Ax
|
| 853 |
+
y = psolve(r1)
|
| 854 |
+
|
| 855 |
+
beta1 = cupy.inner(r1, y)
|
| 856 |
+
|
| 857 |
+
if beta1 < 0:
|
| 858 |
+
raise ValueError('indefinite preconditioner')
|
| 859 |
+
elif beta1 == 0:
|
| 860 |
+
return x, 0
|
| 861 |
+
|
| 862 |
+
beta1 = cupy.sqrt(beta1)
|
| 863 |
+
beta1 = beta1.get().item()
|
| 864 |
+
|
| 865 |
+
if check:
|
| 866 |
+
# see if A is symmetric
|
| 867 |
+
if not _check_symmetric(A, Ax, x, eps):
|
| 868 |
+
raise ValueError('non-symmetric matrix')
|
| 869 |
+
|
| 870 |
+
# see if M is symmetric
|
| 871 |
+
if not _check_symmetric(M, y, r1, eps):
|
| 872 |
+
raise ValueError('non-symmetric preconditioner')
|
| 873 |
+
|
| 874 |
+
oldb = 0
|
| 875 |
+
beta = beta1
|
| 876 |
+
dbar = 0
|
| 877 |
+
epsln = 0
|
| 878 |
+
qrnorm = beta1
|
| 879 |
+
phibar = beta1
|
| 880 |
+
rhs1 = beta1
|
| 881 |
+
rhs2 = 0
|
| 882 |
+
tnorm2 = 0
|
| 883 |
+
gmax = 0
|
| 884 |
+
gmin = cupy.finfo(xtype).max
|
| 885 |
+
cs = -1
|
| 886 |
+
sn = 0
|
| 887 |
+
w = cupy.zeros(n, dtype=xtype)
|
| 888 |
+
w2 = cupy.zeros(n, dtype=xtype)
|
| 889 |
+
r2 = r1
|
| 890 |
+
|
| 891 |
+
while itn < maxiter:
|
| 892 |
+
|
| 893 |
+
itn += 1
|
| 894 |
+
s = 1.0 / beta
|
| 895 |
+
v = s * y
|
| 896 |
+
|
| 897 |
+
y = matvec(v)
|
| 898 |
+
y -= shift * v
|
| 899 |
+
|
| 900 |
+
if itn >= 2:
|
| 901 |
+
y -= (beta / oldb) * r1
|
| 902 |
+
|
| 903 |
+
alpha = cupy.inner(v, y)
|
| 904 |
+
alpha = alpha.get().item()
|
| 905 |
+
y -= (alpha / beta) * r2
|
| 906 |
+
r1 = r2
|
| 907 |
+
r2 = y
|
| 908 |
+
y = psolve(r2)
|
| 909 |
+
oldb = beta
|
| 910 |
+
beta = cupy.inner(r2, y)
|
| 911 |
+
beta = beta.get().item()
|
| 912 |
+
beta = numpy.sqrt(beta)
|
| 913 |
+
if beta < 0:
|
| 914 |
+
raise ValueError('non-symmetric matrix')
|
| 915 |
+
|
| 916 |
+
tnorm2 += alpha ** 2 + oldb ** 2 + beta ** 2
|
| 917 |
+
|
| 918 |
+
if itn == 1:
|
| 919 |
+
if beta / beta1 <= 10 * eps:
|
| 920 |
+
istop = -1
|
| 921 |
+
|
| 922 |
+
# Apply previous rotation Qk-1 to get
|
| 923 |
+
# [deltak epslnk+1] = [cs sn][dbark 0 ]
|
| 924 |
+
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
|
| 925 |
+
|
| 926 |
+
oldeps = epsln
|
| 927 |
+
delta = cs * dbar + sn * alpha # delta1 = 0 deltak
|
| 928 |
+
gbar = sn * dbar - cs * alpha # gbar 1 = alfa1 gbar k
|
| 929 |
+
epsln = sn * beta # epsln2 = 0 epslnk+1
|
| 930 |
+
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
|
| 931 |
+
root = numpy.linalg.norm([gbar, dbar])
|
| 932 |
+
|
| 933 |
+
# Compute the next plane rotation Qk
|
| 934 |
+
|
| 935 |
+
gamma = numpy.linalg.norm([gbar, beta]) # gammak
|
| 936 |
+
gamma = max(gamma, eps)
|
| 937 |
+
cs = gbar / gamma # ck
|
| 938 |
+
sn = beta / gamma # sk
|
| 939 |
+
phi = cs * phibar # phik
|
| 940 |
+
phibar = sn * phibar # phibark+1
|
| 941 |
+
|
| 942 |
+
# Update x.
|
| 943 |
+
|
| 944 |
+
denom = 1.0 / gamma
|
| 945 |
+
w1 = w2
|
| 946 |
+
w2 = w
|
| 947 |
+
w = (v - oldeps * w1 - delta * w2) * denom
|
| 948 |
+
x += phi * w
|
| 949 |
+
|
| 950 |
+
# Go round again.
|
| 951 |
+
|
| 952 |
+
gmax = max(gmax, gamma)
|
| 953 |
+
gmin = min(gmin, gamma)
|
| 954 |
+
z = rhs1 / gamma
|
| 955 |
+
rhs1 = rhs2 - delta * z
|
| 956 |
+
rhs2 = - epsln * z
|
| 957 |
+
|
| 958 |
+
# Estimate various norms and test for convergence.
|
| 959 |
+
|
| 960 |
+
Anorm = numpy.sqrt(tnorm2)
|
| 961 |
+
ynorm = cupy.linalg.norm(x)
|
| 962 |
+
ynorm = ynorm.get().item()
|
| 963 |
+
epsa = Anorm * eps
|
| 964 |
+
epsx = Anorm * ynorm * eps
|
| 965 |
+
diag = gbar
|
| 966 |
+
|
| 967 |
+
if diag == 0:
|
| 968 |
+
diag = epsa
|
| 969 |
+
|
| 970 |
+
qrnorm = phibar
|
| 971 |
+
rnorm = qrnorm
|
| 972 |
+
if ynorm == 0 or Anorm == 0:
|
| 973 |
+
test1 = numpy.inf
|
| 974 |
+
else:
|
| 975 |
+
test1 = rnorm / (Anorm * ynorm) # ||r|| / (||A|| ||x||)
|
| 976 |
+
if Anorm == 0:
|
| 977 |
+
test2 = numpy.inf
|
| 978 |
+
else:
|
| 979 |
+
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
|
| 980 |
+
|
| 981 |
+
# Estimate cond(A).
|
| 982 |
+
# In this version we look at the diagonals of R in the
|
| 983 |
+
# factorization of the lower Hessenberg matrix, Q * H = R,
|
| 984 |
+
# where H is the tridiagonal matrix from Lanczos with one
|
| 985 |
+
# extra row, beta(k+1) e_k^T.
|
| 986 |
+
|
| 987 |
+
Acond = gmax / gmin
|
| 988 |
+
|
| 989 |
+
# See if any of the stopping criteria are satisfied.
|
| 990 |
+
# In rare cases, istop is already -1 from above (Abar = const*I).
|
| 991 |
+
|
| 992 |
+
if istop == 0:
|
| 993 |
+
t1 = 1 + test1 # These tests work if tol < eps
|
| 994 |
+
t2 = 1 + test2
|
| 995 |
+
if t2 <= 1:
|
| 996 |
+
istop = 2
|
| 997 |
+
if t1 <= 1:
|
| 998 |
+
istop = 1
|
| 999 |
+
|
| 1000 |
+
if itn >= maxiter:
|
| 1001 |
+
istop = 6
|
| 1002 |
+
if Acond >= 0.1 / eps:
|
| 1003 |
+
istop = 4
|
| 1004 |
+
if epsx >= beta1:
|
| 1005 |
+
istop = 3
|
| 1006 |
+
# epsr = Anorm * ynorm * tol
|
| 1007 |
+
# if rnorm <= epsx : istop = 2
|
| 1008 |
+
# if rnorm <= epsr : istop = 1
|
| 1009 |
+
if test2 <= tol:
|
| 1010 |
+
istop = 2
|
| 1011 |
+
if test1 <= tol:
|
| 1012 |
+
istop = 1
|
| 1013 |
+
|
| 1014 |
+
if callback is not None:
|
| 1015 |
+
callback(x)
|
| 1016 |
+
|
| 1017 |
+
if istop != 0:
|
| 1018 |
+
break
|
| 1019 |
+
|
| 1020 |
+
if istop == 6:
|
| 1021 |
+
info = maxiter
|
| 1022 |
+
else:
|
| 1023 |
+
info = 0
|
| 1024 |
+
|
| 1025 |
+
return x, info
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
def _check_symmetric(op1, op2, vec, eps):
|
| 1029 |
+
r2 = op1 * op2
|
| 1030 |
+
s = cupy.inner(op2, op2)
|
| 1031 |
+
t = cupy.inner(vec, r2)
|
| 1032 |
+
z = abs(s - t)
|
| 1033 |
+
epsa = (s + eps) * eps ** (1.0 / 3.0)
|
| 1034 |
+
if z > epsa:
|
| 1035 |
+
return False
|
| 1036 |
+
return True
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.97 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_basic.cpython-310.pyc
ADDED
|
Binary file (6.01 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_bessel.cpython-310.pyc
ADDED
|
Binary file (10.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_beta.cpython-310.pyc
ADDED
|
Binary file (22.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_binom.cpython-310.pyc
ADDED
|
Binary file (3.21 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_complexstuff.cpython-310.pyc
ADDED
|
Binary file (1.61 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_convex_analysis.cpython-310.pyc
ADDED
|
Binary file (2.71 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_digamma.cpython-310.pyc
ADDED
|
Binary file (3.88 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_ellip.cpython-310.pyc
ADDED
|
Binary file (4.91 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_erf.cpython-310.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_exp1.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expi.cpython-310.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_expn.cpython-310.pyc
ADDED
|
Binary file (7.06 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gamma.cpython-310.pyc
ADDED
|
Binary file (4.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammainc.cpython-310.pyc
ADDED
|
Binary file (43.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammaln.cpython-310.pyc
ADDED
|
Binary file (1.77 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_gammasgn.cpython-310.pyc
ADDED
|
Binary file (1.21 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/cupyx/scipy/special/__pycache__/_loggamma.cpython-310.pyc
ADDED
|
Binary file (6.43 kB). View file
|
|
|