repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
scipy
scipy-main/scipy/integrate/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/integrate/tests/test_integrate.py
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers """ Tests for numerical integration. """ import numpy as np from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, allclose) from numpy.testing import ( assert_, assert_array_almost_equal, assert_allclose, assert_array_equal, assert_equal, assert_warns) from pytest import raises as assert_raises from scipy.integrate import odeint, ode, complex_ode #------------------------------------------------------------------------------ # Test ODE integrators #------------------------------------------------------------------------------ class TestOdeint: # Check integrate.odeint def _do_problem(self, problem): t = arange(0.0, problem.stop_t, 0.05) # Basic case z, infodict = odeint(problem.f, problem.z0, t, full_output=True) assert_(problem.verify(z, t)) # Use tfirst=True z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, full_output=True, tfirst=True) assert_(problem.verify(z, t)) if hasattr(problem, 'jac'): # Use Dfun z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac, full_output=True) assert_(problem.verify(z, t)) # Use Dfun and tfirst=True z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, Dfun=lambda t, y: problem.jac(y, t), full_output=True, tfirst=True) assert_(problem.verify(z, t)) def test_odeint(self): for problem_cls in PROBLEMS: problem = problem_cls() if problem.cmplx: continue self._do_problem(problem) class TestODEClass: ode_class = None # Set in subclass. def _do_problem(self, problem, integrator, method='adams'): # ode has callback arguments in different order than odeint def f(t, z): return problem.f(z, t) jac = None if hasattr(problem, 'jac'): def jac(t, z): return problem.jac(z, t) integrator_params = {} if problem.lband is not None or problem.uband is not None: integrator_params['uband'] = problem.uband integrator_params['lband'] = problem.lband ig = self.ode_class(f, jac) ig.set_integrator(integrator, atol=problem.atol/10, rtol=problem.rtol/10, method=method, **integrator_params) ig.set_initial_value(problem.z0, t=0.0) z = ig.integrate(problem.stop_t) assert_array_equal(z, ig.y) assert_(ig.successful(), (problem, method)) assert_(ig.get_return_code() > 0, (problem, method)) assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) class TestOde(TestODEClass): ode_class = ode def test_vode(self): # Check the vode solver for problem_cls in PROBLEMS: problem = problem_cls() if problem.cmplx: continue if not problem.stiff: self._do_problem(problem, 'vode', 'adams') self._do_problem(problem, 'vode', 'bdf') def test_zvode(self): # Check the zvode solver for problem_cls in PROBLEMS: problem = problem_cls() if not problem.stiff: self._do_problem(problem, 'zvode', 'adams') self._do_problem(problem, 'zvode', 'bdf') def test_lsoda(self): # Check the lsoda solver for problem_cls in PROBLEMS: problem = problem_cls() if problem.cmplx: continue self._do_problem(problem, 'lsoda') def test_dopri5(self): # Check the dopri5 solver for problem_cls in PROBLEMS: problem = problem_cls() if problem.cmplx: continue if problem.stiff: continue if hasattr(problem, 'jac'): continue self._do_problem(problem, 'dopri5') def test_dop853(self): # Check the dop853 solver for problem_cls in PROBLEMS: problem = problem_cls() if problem.cmplx: continue if problem.stiff: continue if hasattr(problem, 'jac'): continue self._do_problem(problem, 'dop853') def test_concurrent_fail(self): for sol in ('vode', 'zvode', 'lsoda'): def f(t, y): return 1.0 r = ode(f).set_integrator(sol) r.set_initial_value(0, 0) r2 = ode(f).set_integrator(sol) r2.set_initial_value(0, 0) r.integrate(r.t + 0.1) r2.integrate(r2.t + 0.1) assert_raises(RuntimeError, r.integrate, r.t + 0.1) def test_concurrent_ok(self): def f(t, y): return 1.0 for k in range(3): for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'): r = ode(f).set_integrator(sol) r.set_initial_value(0, 0) r2 = ode(f).set_integrator(sol) r2.set_initial_value(0, 0) r.integrate(r.t + 0.1) r2.integrate(r2.t + 0.1) r2.integrate(r2.t + 0.1) assert_allclose(r.y, 0.1) assert_allclose(r2.y, 0.2) for sol in ('dopri5', 'dop853'): r = ode(f).set_integrator(sol) r.set_initial_value(0, 0) r2 = ode(f).set_integrator(sol) r2.set_initial_value(0, 0) r.integrate(r.t + 0.1) r.integrate(r.t + 0.1) r2.integrate(r2.t + 0.1) r.integrate(r.t + 0.1) r2.integrate(r2.t + 0.1) assert_allclose(r.y, 0.3) assert_allclose(r2.y, 0.2) class TestComplexOde(TestODEClass): ode_class = complex_ode def test_vode(self): # Check the vode solver for problem_cls in PROBLEMS: problem = problem_cls() if not problem.stiff: self._do_problem(problem, 'vode', 'adams') else: self._do_problem(problem, 'vode', 'bdf') def test_lsoda(self): # Check the lsoda solver for problem_cls in PROBLEMS: problem = problem_cls() self._do_problem(problem, 'lsoda') def test_dopri5(self): # Check the dopri5 solver for problem_cls in PROBLEMS: problem = problem_cls() if problem.stiff: continue if hasattr(problem, 'jac'): continue self._do_problem(problem, 'dopri5') def test_dop853(self): # Check the dop853 solver for problem_cls in PROBLEMS: problem = problem_cls() if problem.stiff: continue if hasattr(problem, 'jac'): continue self._do_problem(problem, 'dop853') class TestSolout: # Check integrate.ode correctly handles solout for dopri5 and dop853 def _run_solout_test(self, integrator): # Check correct usage of solout ts = [] ys = [] t0 = 0.0 tend = 10.0 y0 = [1.0, 2.0] def solout(t, y): ts.append(t) ys.append(y.copy()) def rhs(t, y): return [y[0] + y[1], -y[1]**2] ig = ode(rhs).set_integrator(integrator) ig.set_solout(solout) ig.set_initial_value(y0, t0) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_equal(ts[-1], tend) def test_solout(self): for integrator in ('dopri5', 'dop853'): self._run_solout_test(integrator) def _run_solout_after_initial_test(self, integrator): # Check if solout works even if it is set after the initial value. ts = [] ys = [] t0 = 0.0 tend = 10.0 y0 = [1.0, 2.0] def solout(t, y): ts.append(t) ys.append(y.copy()) def rhs(t, y): return [y[0] + y[1], -y[1]**2] ig = ode(rhs).set_integrator(integrator) ig.set_initial_value(y0, t0) ig.set_solout(solout) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_equal(ts[-1], tend) def test_solout_after_initial(self): for integrator in ('dopri5', 'dop853'): self._run_solout_after_initial_test(integrator) def _run_solout_break_test(self, integrator): # Check correct usage of stopping via solout ts = [] ys = [] t0 = 0.0 tend = 10.0 y0 = [1.0, 2.0] def solout(t, y): ts.append(t) ys.append(y.copy()) if t > tend/2.0: return -1 def rhs(t, y): return [y[0] + y[1], -y[1]**2] ig = ode(rhs).set_integrator(integrator) ig.set_solout(solout) ig.set_initial_value(y0, t0) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_(ts[-1] > tend/2.0) assert_(ts[-1] < tend) def test_solout_break(self): for integrator in ('dopri5', 'dop853'): self._run_solout_break_test(integrator) class TestComplexSolout: # Check integrate.ode correctly handles solout for dopri5 and dop853 def _run_solout_test(self, integrator): # Check correct usage of solout ts = [] ys = [] t0 = 0.0 tend = 20.0 y0 = [0.0] def solout(t, y): ts.append(t) ys.append(y.copy()) def rhs(t, y): return [1.0/(t - 10.0 - 1j)] ig = complex_ode(rhs).set_integrator(integrator) ig.set_solout(solout) ig.set_initial_value(y0, t0) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_equal(ts[-1], tend) def test_solout(self): for integrator in ('dopri5', 'dop853'): self._run_solout_test(integrator) def _run_solout_break_test(self, integrator): # Check correct usage of stopping via solout ts = [] ys = [] t0 = 0.0 tend = 20.0 y0 = [0.0] def solout(t, y): ts.append(t) ys.append(y.copy()) if t > tend/2.0: return -1 def rhs(t, y): return [1.0/(t - 10.0 - 1j)] ig = complex_ode(rhs).set_integrator(integrator) ig.set_solout(solout) ig.set_initial_value(y0, t0) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_(ts[-1] > tend/2.0) assert_(ts[-1] < tend) def test_solout_break(self): for integrator in ('dopri5', 'dop853'): self._run_solout_break_test(integrator) #------------------------------------------------------------------------------ # Test problems #------------------------------------------------------------------------------ class ODE: """ ODE problem """ stiff = False cmplx = False stop_t = 1 z0 = [] lband = None uband = None atol = 1e-6 rtol = 1e-5 class SimpleOscillator(ODE): r""" Free vibration of a simple oscillator:: m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 Solution:: u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) """ stop_t = 1 + 0.09 z0 = array([1.0, 0.1], float) k = 4.0 m = 1.0 def f(self, z, t): tmp = zeros((2, 2), float) tmp[0, 1] = 1.0 tmp[1, 0] = -self.k / self.m return dot(tmp, z) def verify(self, zs, t): omega = sqrt(self.k / self.m) u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol) class ComplexExp(ODE): r"""The equation :lm:`\dot u = i u`""" stop_t = 1.23*pi z0 = exp([1j, 2j, 3j, 4j, 5j]) cmplx = True def f(self, z, t): return 1j*z def jac(self, z, t): return 1j*eye(5) def verify(self, zs, t): u = self.z0 * exp(1j*t) return allclose(u, zs, atol=self.atol, rtol=self.rtol) class Pi(ODE): r"""Integrate 1/(t + 1j) from t=-10 to t=10""" stop_t = 20 z0 = [0] cmplx = True def f(self, z, t): return array([1./(t - 10 + 1j)]) def verify(self, zs, t): u = -2j * np.arctan(10) return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol) class CoupledDecay(ODE): r""" 3 coupled decays suited for banded treatment (banded mode makes it necessary when N>>3) """ stiff = True stop_t = 0.5 z0 = [5.0, 7.0, 13.0] lband = 1 uband = 0 lmbd = [0.17, 0.23, 0.29] # fictitious decay constants def f(self, z, t): lmbd = self.lmbd return np.array([-lmbd[0]*z[0], -lmbd[1]*z[1] + lmbd[0]*z[0], -lmbd[2]*z[2] + lmbd[1]*z[1]]) def jac(self, z, t): # The full Jacobian is # # [-lmbd[0] 0 0 ] # [ lmbd[0] -lmbd[1] 0 ] # [ 0 lmbd[1] -lmbd[2]] # # The lower and upper bandwidths are lband=1 and uband=0, resp. # The representation of this array in packed format is # # [-lmbd[0] -lmbd[1] -lmbd[2]] # [ lmbd[0] lmbd[1] 0 ] lmbd = self.lmbd j = np.zeros((self.lband + self.uband + 1, 3), order='F') def set_j(ri, ci, val): j[self.uband + ri - ci, ci] = val set_j(0, 0, -lmbd[0]) set_j(1, 0, lmbd[0]) set_j(1, 1, -lmbd[1]) set_j(2, 1, lmbd[1]) set_j(2, 2, -lmbd[2]) return j def verify(self, zs, t): # Formulae derived by hand lmbd = np.array(self.lmbd) d10 = lmbd[1] - lmbd[0] d21 = lmbd[2] - lmbd[1] d20 = lmbd[2] - lmbd[0] e0 = np.exp(-lmbd[0] * t) e1 = np.exp(-lmbd[1] * t) e2 = np.exp(-lmbd[2] * t) u = np.vstack(( self.z0[0] * e0, self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1), self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) + lmbd[1] * lmbd[0] * self.z0[0] / d10 * (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose() return allclose(u, zs, atol=self.atol, rtol=self.rtol) PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay] #------------------------------------------------------------------------------ def f(t, x): dxdt = [x[1], -x[0]] return dxdt def jac(t, x): j = array([[0.0, 1.0], [-1.0, 0.0]]) return j def f1(t, x, omega): dxdt = [omega*x[1], -omega*x[0]] return dxdt def jac1(t, x, omega): j = array([[0.0, omega], [-omega, 0.0]]) return j def f2(t, x, omega1, omega2): dxdt = [omega1*x[1], -omega2*x[0]] return dxdt def jac2(t, x, omega1, omega2): j = array([[0.0, omega1], [-omega2, 0.0]]) return j def fv(t, x, omega): dxdt = [omega[0]*x[1], -omega[1]*x[0]] return dxdt def jacv(t, x, omega): j = array([[0.0, omega[0]], [-omega[1], 0.0]]) return j class ODECheckParameterUse: """Call an ode-class solver with several cases of parameter use.""" # solver_name must be set before tests can be run with this class. # Set these in subclasses. solver_name = '' solver_uses_jac = False def _get_solver(self, f, jac): solver = ode(f, jac) if self.solver_uses_jac: solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, with_jacobian=self.solver_uses_jac) else: # XXX Shouldn't set_integrator *always* accept the keyword arg # 'with_jacobian', and perhaps raise an exception if it is set # to True if the solver can't actually use it? solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) return solver def _check_solver(self, solver): ic = [1.0, 0.0] solver.set_initial_value(ic, 0.0) solver.integrate(pi) assert_array_almost_equal(solver.y, [-1.0, 0.0]) def test_no_params(self): solver = self._get_solver(f, jac) self._check_solver(solver) def test_one_scalar_param(self): solver = self._get_solver(f1, jac1) omega = 1.0 solver.set_f_params(omega) if self.solver_uses_jac: solver.set_jac_params(omega) self._check_solver(solver) def test_two_scalar_params(self): solver = self._get_solver(f2, jac2) omega1 = 1.0 omega2 = 1.0 solver.set_f_params(omega1, omega2) if self.solver_uses_jac: solver.set_jac_params(omega1, omega2) self._check_solver(solver) def test_vector_param(self): solver = self._get_solver(fv, jacv) omega = [1.0, 1.0] solver.set_f_params(omega) if self.solver_uses_jac: solver.set_jac_params(omega) self._check_solver(solver) def test_warns_on_failure(self): # Set nsteps small to ensure failure solver = self._get_solver(f, jac) solver.set_integrator(self.solver_name, nsteps=1) ic = [1.0, 0.0] solver.set_initial_value(ic, 0.0) assert_warns(UserWarning, solver.integrate, pi) class TestDOPRI5CheckParameterUse(ODECheckParameterUse): solver_name = 'dopri5' solver_uses_jac = False class TestDOP853CheckParameterUse(ODECheckParameterUse): solver_name = 'dop853' solver_uses_jac = False class TestVODECheckParameterUse(ODECheckParameterUse): solver_name = 'vode' solver_uses_jac = True class TestZVODECheckParameterUse(ODECheckParameterUse): solver_name = 'zvode' solver_uses_jac = True class TestLSODACheckParameterUse(ODECheckParameterUse): solver_name = 'lsoda' solver_uses_jac = True def test_odeint_trivial_time(): # Test that odeint succeeds when given a single time point # and full_output=True. This is a regression test for gh-4282. y0 = 1 t = [0] y, info = odeint(lambda y, t: -y, y0, t, full_output=True) assert_array_equal(y, np.array([[y0]])) def test_odeint_banded_jacobian(): # Test the use of the `Dfun`, `ml` and `mu` options of odeint. def func(y, t, c): return c.dot(y) def jac(y, t, c): return c def jac_transpose(y, t, c): return c.T.copy(order='C') def bjac_rows(y, t, c): jac = np.row_stack((np.r_[0, np.diag(c, 1)], np.diag(c), np.r_[np.diag(c, -1), 0], np.r_[np.diag(c, -2), 0, 0])) return jac def bjac_cols(y, t, c): return bjac_rows(y, t, c).T.copy(order='C') c = array([[-205, 0.01, 0.00, 0.0], [0.1, -2.50, 0.02, 0.0], [1e-3, 0.01, -2.0, 0.01], [0.00, 0.00, 0.1, -1.0]]) y0 = np.ones(4) t = np.array([0, 5, 10, 100]) # Use the full Jacobian. sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True, atol=1e-13, rtol=1e-11, mxstep=10000, Dfun=jac) # Use the transposed full Jacobian, with col_deriv=True. sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True, atol=1e-13, rtol=1e-11, mxstep=10000, Dfun=jac_transpose, col_deriv=True) # Use the banded Jacobian. sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True, atol=1e-13, rtol=1e-11, mxstep=10000, Dfun=bjac_rows, ml=2, mu=1) # Use the transposed banded Jacobian, with col_deriv=True. sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True, atol=1e-13, rtol=1e-11, mxstep=10000, Dfun=bjac_cols, ml=2, mu=1, col_deriv=True) assert_allclose(sol1, sol2, err_msg="sol1 != sol2") assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3") assert_allclose(sol3, sol4, err_msg="sol3 != sol4") # Verify that the number of jacobian evaluations was the same for the # calls of odeint with a full jacobian and with a banded jacobian. This is # a regression test--there was a bug in the handling of banded jacobians # that resulted in an incorrect jacobian matrix being passed to the LSODA # code. That would cause errors or excessive jacobian evaluations. assert_array_equal(info1['nje'], info2['nje']) assert_array_equal(info3['nje'], info4['nje']) # Test the use of tfirst sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,), full_output=True, atol=1e-13, rtol=1e-11, mxstep=10000, Dfun=lambda t, y, c: jac(y, t, c), tfirst=True) # The code should execute the exact same sequence of floating point # calculations, so these should be exactly equal. We'll be safe and use # a small tolerance. assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty") def test_odeint_errors(): def sys1d(x, t): return -100*x def bad1(x, t): return 1.0/0 def bad2(x, t): return "foo" def bad_jac1(x, t): return 1.0/0 def bad_jac2(x, t): return [["foo"]] def sys2d(x, t): return [-100*x[0], -0.1*x[1]] def sys2d_bad_jac(x, t): return [[1.0/0, 0], [0, -0.1]] assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1]) assert_raises(ValueError, odeint, bad2, 1.0, [0, 1]) assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1) assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2) assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1], Dfun=sys2d_bad_jac) def test_odeint_bad_shapes(): # Tests of some errors that can occur with odeint. def badrhs(x, t): return [1, -1] def sys1(x, t): return -100*x def badjac(x, t): return [[0, 0, 0]] # y0 must be at most 1-d. bad_y0 = [[0, 0], [0, 0]] assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1]) # t must be at most 1-d. bad_t = [[0, 1], [2, 3]] assert_raises(ValueError, odeint, sys1, [10.0], bad_t) # y0 is 10, but badrhs(x, t) returns [1, -1]. assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1]) # shape of array returned by badjac(x, t) is not correct. assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac) def test_repeated_t_values(): """Regression test for gh-8217.""" def func(x, t): return -0.25*x t = np.zeros(10) sol = odeint(func, [1.], t) assert_array_equal(sol, np.ones((len(t), 1))) tau = 4*np.log(2) t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau] sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12) expected_sol = np.array([[1.0, 2.0]]*9 + [[0.5, 1.0], [0.25, 0.5], [0.25, 0.5], [0.125, 0.25]]) assert_allclose(sol, expected_sol) # Edge case: empty t sequence. sol = odeint(func, [1.], []) assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1))) # t values are not monotonic. assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0]) assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
24,403
28.226347
79
py
scipy
scipy-main/scipy/integrate/tests/test_banded_ode_solvers.py
import itertools import numpy as np from numpy.testing import assert_allclose from scipy.integrate import ode def _band_count(a): """Returns ml and mu, the lower and upper band sizes of a.""" nrows, ncols = a.shape ml = 0 for k in range(-nrows+1, 0): if np.diag(a, k).any(): ml = -k break mu = 0 for k in range(nrows-1, 0, -1): if np.diag(a, k).any(): mu = k break return ml, mu def _linear_func(t, y, a): """Linear system dy/dt = a * y""" return a.dot(y) def _linear_jac(t, y, a): """Jacobian of a * y is a.""" return a def _linear_banded_jac(t, y, a): """Banded Jacobian.""" ml, mu = _band_count(a) bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] bjac.append(np.diag(a)) for k in range(-1, -ml-1, -1): bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) return bjac def _solve_linear_sys(a, y0, tend=1, dt=0.1, solver=None, method='bdf', use_jac=True, with_jacobian=False, banded=False): """Use scipy.integrate.ode to solve a linear system of ODEs. a : square ndarray Matrix of the linear system to be solved. y0 : ndarray Initial condition tend : float Stop time. dt : float Step size of the output. solver : str If not None, this must be "vode", "lsoda" or "zvode". method : str Either "bdf" or "adams". use_jac : bool Determines if the jacobian function is passed to ode(). with_jacobian : bool Passed to ode.set_integrator(). banded : bool Determines whether a banded or full jacobian is used. If `banded` is True, `lband` and `uband` are determined by the values in `a`. """ if banded: lband, uband = _band_count(a) else: lband = None uband = None if use_jac: if banded: r = ode(_linear_func, _linear_banded_jac) else: r = ode(_linear_func, _linear_jac) else: r = ode(_linear_func) if solver is None: if np.iscomplexobj(a): solver = "zvode" else: solver = "vode" r.set_integrator(solver, with_jacobian=with_jacobian, method=method, lband=lband, uband=uband, rtol=1e-9, atol=1e-10, ) t0 = 0 r.set_initial_value(y0, t0) r.set_f_params(a) r.set_jac_params(a) t = [t0] y = [y0] while r.successful() and r.t < tend: r.integrate(r.t + dt) t.append(r.t) y.append(r.y) t = np.array(t) y = np.array(y) return t, y def _analytical_solution(a, y0, t): """ Analytical solution to the linear differential equations dy/dt = a*y. The solution is only valid if `a` is diagonalizable. Returns a 2-D array with shape (len(t), len(y0)). """ lam, v = np.linalg.eig(a) c = np.linalg.solve(v, y0) e = c * np.exp(lam * t.reshape(-1, 1)) sol = e.dot(v.T) return sol def test_banded_ode_solvers(): # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class # with a system that has a banded Jacobian matrix. t_exact = np.linspace(0, 1.0, 5) # --- Real arrays for testing the "lsoda" and "vode" solvers --- # lband = 2, uband = 1: a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], [0.2, -0.5, 0.9, 0.0, 0.0], [0.1, 0.1, -0.4, 0.1, 0.0], [0.0, 0.3, -0.1, -0.9, -0.3], [0.0, 0.0, 0.1, 0.1, -0.7]]) # lband = 0, uband = 1: a_real_upper = np.triu(a_real) # lband = 2, uband = 0: a_real_lower = np.tril(a_real) # lband = 0, uband = 0: a_real_diag = np.triu(a_real_lower) real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] real_solutions = [] for a in real_matrices: y0 = np.arange(1, a.shape[0] + 1) y_exact = _analytical_solution(a, y0, t_exact) real_solutions.append((y0, t_exact, y_exact)) def check_real(idx, solver, meth, use_jac, with_jac, banded): a = real_matrices[idx] y0, t_exact, y_exact = real_solutions[idx] t, y = _solve_linear_sys(a, y0, tend=t_exact[-1], dt=t_exact[1] - t_exact[0], solver=solver, method=meth, use_jac=use_jac, with_jacobian=with_jac, banded=banded) assert_allclose(t, t_exact) assert_allclose(y, y_exact) for idx in range(len(real_matrices)): p = [['vode', 'lsoda'], # solver ['bdf', 'adams'], # method [False, True], # use_jac [False, True], # with_jacobian [False, True]] # banded for solver, meth, use_jac, with_jac, banded in itertools.product(*p): check_real(idx, solver, meth, use_jac, with_jac, banded) # --- Complex arrays for testing the "zvode" solver --- # complex, lband = 2, uband = 1: a_complex = a_real - 0.5j * a_real # complex, lband = 0, uband = 0: a_complex_diag = np.diag(np.diag(a_complex)) complex_matrices = [a_complex, a_complex_diag] complex_solutions = [] for a in complex_matrices: y0 = np.arange(1, a.shape[0] + 1) + 1j y_exact = _analytical_solution(a, y0, t_exact) complex_solutions.append((y0, t_exact, y_exact)) def check_complex(idx, solver, meth, use_jac, with_jac, banded): a = complex_matrices[idx] y0, t_exact, y_exact = complex_solutions[idx] t, y = _solve_linear_sys(a, y0, tend=t_exact[-1], dt=t_exact[1] - t_exact[0], solver=solver, method=meth, use_jac=use_jac, with_jacobian=with_jac, banded=banded) assert_allclose(t, t_exact) assert_allclose(y, y_exact) for idx in range(len(complex_matrices)): p = [['bdf', 'adams'], # method [False, True], # use_jac [False, True], # with_jacobian [False, True]] # banded for meth, use_jac, with_jac, banded in itertools.product(*p): check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
6,687
29.538813
77
py
scipy
scipy-main/scipy/integrate/_ivp/radau.py
import numpy as np from scipy.linalg import lu_factor, lu_solve from scipy.sparse import csc_matrix, issparse, eye from scipy.sparse.linalg import splu from scipy.optimize._numdiff import group_columns from .common import (validate_max_step, validate_tol, select_initial_step, norm, num_jac, EPS, warn_extraneous, validate_first_step) from .base import OdeSolver, DenseOutput S6 = 6 ** 0.5 # Butcher tableau. A is not used directly, see below. C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1]) E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3 # Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue # and a complex conjugate pair. They are written below. MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3) MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3)) - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6))) # These are transformation matrices. T = np.array([ [0.09443876248897524, -0.14125529502095421, 0.03002919410514742], [0.25021312296533332, 0.20412935229379994, -0.38294211275726192], [1, 1, 0]]) TI = np.array([ [4.17871859155190428, 0.32768282076106237, 0.52337644549944951], [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044], [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]]) # These linear combinations are used in the algorithm. TI_REAL = TI[0] TI_COMPLEX = TI[1] + 1j * TI[2] # Interpolator coefficients. P = np.array([ [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6], [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6], [1/3, -8/3, 10/3]]) NEWTON_MAXITER = 6 # Maximum number of Newton iterations. MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. MAX_FACTOR = 10 # Maximum allowed increase in a step size. def solve_collocation_system(fun, t, y, h, Z0, scale, tol, LU_real, LU_complex, solve_lu): """Solve the collocation system. Parameters ---------- fun : callable Right-hand side of the system. t : float Current time. y : ndarray, shape (n,) Current state. h : float Step to try. Z0 : ndarray, shape (3, n) Initial guess for the solution. It determines new values of `y` at ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants. scale : ndarray, shape (n) Problem tolerance scale, i.e. ``rtol * abs(y) + atol``. tol : float Tolerance to which solve the system. This value is compared with the normalized by `scale` error. LU_real, LU_complex LU decompositions of the system Jacobians. solve_lu : callable Callable which solves a linear system given a LU decomposition. The signature is ``solve_lu(LU, b)``. Returns ------- converged : bool Whether iterations converged. n_iter : int Number of completed iterations. Z : ndarray, shape (3, n) Found solution. rate : float The rate of convergence. """ n = y.shape[0] M_real = MU_REAL / h M_complex = MU_COMPLEX / h W = TI.dot(Z0) Z = Z0 F = np.empty((3, n)) ch = h * C dW_norm_old = None dW = np.empty_like(W) converged = False rate = None for k in range(NEWTON_MAXITER): for i in range(3): F[i] = fun(t + ch[i], y + Z[i]) if not np.all(np.isfinite(F)): break f_real = F.T.dot(TI_REAL) - M_real * W[0] f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2]) dW_real = solve_lu(LU_real, f_real) dW_complex = solve_lu(LU_complex, f_complex) dW[0] = dW_real dW[1] = dW_complex.real dW[2] = dW_complex.imag dW_norm = norm(dW / scale) if dW_norm_old is not None: rate = dW_norm / dW_norm_old if (rate is not None and (rate >= 1 or rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)): break W += dW Z = T.dot(W) if (dW_norm == 0 or rate is not None and rate / (1 - rate) * dW_norm < tol): converged = True break dW_norm_old = dW_norm return converged, k + 1, Z, rate def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old): """Predict by which factor to increase/decrease the step size. The algorithm is described in [1]_. Parameters ---------- h_abs, h_abs_old : float Current and previous values of the step size, `h_abs_old` can be None (see Notes). error_norm, error_norm_old : float Current and previous values of the error norm, `error_norm_old` can be None (see Notes). Returns ------- factor : float Predicted factor. Notes ----- If `h_abs_old` and `error_norm_old` are both not None then a two-step algorithm is used, otherwise a one-step algorithm is used. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. """ if error_norm_old is None or h_abs_old is None or error_norm == 0: multiplier = 1 else: multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25 with np.errstate(divide='ignore'): factor = min(1, multiplier) * error_norm ** -0.25 return factor class Radau(OdeSolver): """Implicit Runge-Kutta method of Radau IIA family of order 5. The implementation follows [1]_. The error is controlled with a third-order accurate embedded formula. A cubic polynomial which satisfies the collocation conditions is used for the dense output. Parameters ---------- fun : callable Right-hand side of the system: the time derivative of the state ``y`` at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must return an array of the same shape as ``y``. See `vectorized` for more information. t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time - the integration won't continue beyond it. It also determines the direction of the integration. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e., the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. jac : {None, array_like, sparse_matrix, callable}, optional Jacobian matrix of the right-hand side of the system with respect to y, required by this method. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. There are three ways to define the Jacobian: * If array_like or sparse_matrix, the Jacobian is assumed to be constant. * If callable, the Jacobian is assumed to depend on both t and y; it will be called as ``jac(t, y)`` as necessary. For the 'Radau' and 'BDF' methods, the return value might be a sparse matrix. * If None (default), the Jacobian will be approximated by finite differences. It is generally recommended to provide the Jacobian rather than relying on a finite-difference approximation. jac_sparsity : {None, array_like, sparse matrix}, optional Defines a sparsity structure of the Jacobian matrix for a finite-difference approximation. Its shape must be (n, n). This argument is ignored if `jac` is not `None`. If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [2]_. A zero entry means that a corresponding element in the Jacobian is always zero. If None (default), the Jacobian is assumed to be dense. vectorized : bool, optional Whether `fun` can be called in a vectorized fashion. Default is False. If ``vectorized`` is False, `fun` will always be called with ``y`` of shape ``(n,)``, where ``n = len(y0)``. If ``vectorized`` is True, `fun` may be called with ``y`` of shape ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of the returned array is the time derivative of the state corresponding with a column of ``y``). Setting ``vectorized=True`` allows for faster finite difference approximation of the Jacobian by this method, but may result in slower execution overall in some circumstances (e.g. small ``len(y0)``). Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. step_size : float Size of the last successful step. None if no steps were made yet. nfev : int Number of evaluations of the right-hand side. njev : int Number of evaluations of the Jacobian. nlu : int Number of LU decompositions. References ---------- .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. """ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, vectorized=False, first_step=None, **extraneous): warn_extraneous(extraneous) super().__init__(fun, t0, y0, t_bound, vectorized) self.y_old = None self.max_step = validate_max_step(max_step) self.rtol, self.atol = validate_tol(rtol, atol, self.n) self.f = self.fun(self.t, self.y) # Select initial step assuming the same order which is used to control # the error. if first_step is None: self.h_abs = select_initial_step( self.fun, self.t, self.y, self.f, self.direction, 3, self.rtol, self.atol) else: self.h_abs = validate_first_step(first_step, t0, t_bound) self.h_abs_old = None self.error_norm_old = None self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) self.sol = None self.jac_factor = None self.jac, self.J = self._validate_jac(jac, jac_sparsity) if issparse(self.J): def lu(A): self.nlu += 1 return splu(A) def solve_lu(LU, b): return LU.solve(b) I = eye(self.n, format='csc') else: def lu(A): self.nlu += 1 return lu_factor(A, overwrite_a=True) def solve_lu(LU, b): return lu_solve(LU, b, overwrite_b=True) I = np.identity(self.n) self.lu = lu self.solve_lu = solve_lu self.I = I self.current_jac = True self.LU_real = None self.LU_complex = None self.Z = None def _validate_jac(self, jac, sparsity): t0 = self.t y0 = self.y if jac is None: if sparsity is not None: if issparse(sparsity): sparsity = csc_matrix(sparsity) groups = group_columns(sparsity) sparsity = (sparsity, groups) def jac_wrapped(t, y, f): self.njev += 1 J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, self.atol, self.jac_factor, sparsity) return J J = jac_wrapped(t0, y0, self.f) elif callable(jac): J = jac(t0, y0) self.njev = 1 if issparse(J): J = csc_matrix(J) def jac_wrapped(t, y, _=None): self.njev += 1 return csc_matrix(jac(t, y), dtype=float) else: J = np.asarray(J, dtype=float) def jac_wrapped(t, y, _=None): self.njev += 1 return np.asarray(jac(t, y), dtype=float) if J.shape != (self.n, self.n): raise ValueError("`jac` is expected to have shape {}, but " "actually has {}." .format((self.n, self.n), J.shape)) else: if issparse(jac): J = csc_matrix(jac) else: J = np.asarray(jac, dtype=float) if J.shape != (self.n, self.n): raise ValueError("`jac` is expected to have shape {}, but " "actually has {}." .format((self.n, self.n), J.shape)) jac_wrapped = None return jac_wrapped, J def _step_impl(self): t = self.t y = self.y f = self.f max_step = self.max_step atol = self.atol rtol = self.rtol min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) if self.h_abs > max_step: h_abs = max_step h_abs_old = None error_norm_old = None elif self.h_abs < min_step: h_abs = min_step h_abs_old = None error_norm_old = None else: h_abs = self.h_abs h_abs_old = self.h_abs_old error_norm_old = self.error_norm_old J = self.J LU_real = self.LU_real LU_complex = self.LU_complex current_jac = self.current_jac jac = self.jac rejected = False step_accepted = False message = None while not step_accepted: if h_abs < min_step: return False, self.TOO_SMALL_STEP h = h_abs * self.direction t_new = t + h if self.direction * (t_new - self.t_bound) > 0: t_new = self.t_bound h = t_new - t h_abs = np.abs(h) if self.sol is None: Z0 = np.zeros((3, y.shape[0])) else: Z0 = self.sol(t + h * C).T - y scale = atol + np.abs(y) * rtol converged = False while not converged: if LU_real is None or LU_complex is None: LU_real = self.lu(MU_REAL / h * self.I - J) LU_complex = self.lu(MU_COMPLEX / h * self.I - J) converged, n_iter, Z, rate = solve_collocation_system( self.fun, t, y, h, Z0, scale, self.newton_tol, LU_real, LU_complex, self.solve_lu) if not converged: if current_jac: break J = self.jac(t, y, f) current_jac = True LU_real = None LU_complex = None if not converged: h_abs *= 0.5 LU_real = None LU_complex = None continue y_new = y + Z[-1] ZE = Z.T.dot(E) / h error = self.solve_lu(LU_real, f + ZE) scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol error_norm = norm(error / scale) safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + n_iter) if rejected and error_norm > 1: error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE) error_norm = norm(error / scale) if error_norm > 1: factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) h_abs *= max(MIN_FACTOR, safety * factor) LU_real = None LU_complex = None rejected = True else: step_accepted = True recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3 factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) factor = min(MAX_FACTOR, safety * factor) if not recompute_jac and factor < 1.2: factor = 1 else: LU_real = None LU_complex = None f_new = self.fun(t_new, y_new) if recompute_jac: J = jac(t_new, y_new, f_new) current_jac = True elif jac is not None: current_jac = False self.h_abs_old = self.h_abs self.error_norm_old = error_norm self.h_abs = h_abs * factor self.y_old = y self.t = t_new self.y = y_new self.f = f_new self.Z = Z self.LU_real = LU_real self.LU_complex = LU_complex self.current_jac = current_jac self.J = J self.t_old = t self.sol = self._compute_dense_output() return step_accepted, message def _compute_dense_output(self): Q = np.dot(self.Z.T, P) return RadauDenseOutput(self.t_old, self.t, self.y_old, Q) def _dense_output_impl(self): return self.sol class RadauDenseOutput(DenseOutput): def __init__(self, t_old, t, y_old, Q): super().__init__(t_old, t) self.h = t - t_old self.Q = Q self.order = Q.shape[1] - 1 self.y_old = y_old def _call_impl(self, t): x = (t - self.t_old) / self.h if t.ndim == 0: p = np.tile(x, self.order + 1) p = np.cumprod(p) else: p = np.tile(x, (self.order + 1, 1)) p = np.cumprod(p, axis=0) # Here we don't multiply by h, not a mistake. y = np.dot(self.Q, p) if y.ndim == 2: y += self.y_old[:, None] else: y += self.y_old return y
19,743
33.337391
80
py
scipy
scipy-main/scipy/integrate/_ivp/base.py
import numpy as np def check_arguments(fun, y0, support_complex): """Helper function for checking arguments common to all solvers.""" y0 = np.asarray(y0) if np.issubdtype(y0.dtype, np.complexfloating): if not support_complex: raise ValueError("`y0` is complex, but the chosen solver does " "not support integration in a complex domain.") dtype = complex else: dtype = float y0 = y0.astype(dtype, copy=False) if y0.ndim != 1: raise ValueError("`y0` must be 1-dimensional.") if not np.isfinite(y0).all(): raise ValueError("All components of the initial state `y0` must be finite.") def fun_wrapped(t, y): return np.asarray(fun(t, y), dtype=dtype) return fun_wrapped, y0 class OdeSolver: """Base class for ODE solvers. In order to implement a new solver you need to follow the guidelines: 1. A constructor must accept parameters presented in the base class (listed below) along with any other parameters specific to a solver. 2. A constructor must accept arbitrary extraneous arguments ``**extraneous``, but warn that these arguments are irrelevant using `common.warn_extraneous` function. Do not pass these arguments to the base class. 3. A solver must implement a private method `_step_impl(self)` which propagates a solver one step further. It must return tuple ``(success, message)``, where ``success`` is a boolean indicating whether a step was successful, and ``message`` is a string containing description of a failure if a step failed or None otherwise. 4. A solver must implement a private method `_dense_output_impl(self)`, which returns a `DenseOutput` object covering the last successful step. 5. A solver must have attributes listed below in Attributes section. Note that ``t_old`` and ``step_size`` are updated automatically. 6. Use `fun(self, t, y)` method for the system rhs evaluation, this way the number of function evaluations (`nfev`) will be tracked automatically. 7. For convenience, a base class provides `fun_single(self, t, y)` and `fun_vectorized(self, t, y)` for evaluating the rhs in non-vectorized and vectorized fashions respectively (regardless of how `fun` from the constructor is implemented). These calls don't increment `nfev`. 8. If a solver uses a Jacobian matrix and LU decompositions, it should track the number of Jacobian evaluations (`njev`) and the number of LU decompositions (`nlu`). 9. By convention, the function evaluations used to compute a finite difference approximation of the Jacobian should not be counted in `nfev`, thus use `fun_single(self, t, y)` or `fun_vectorized(self, t, y)` when computing a finite difference approximation of the Jacobian. Parameters ---------- fun : callable Right-hand side of the system: the time derivative of the state ``y`` at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must return an array of the same shape as ``y``. See `vectorized` for more information. t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time --- the integration won't continue beyond it. It also determines the direction of the integration. vectorized : bool Whether `fun` can be called in a vectorized fashion. Default is False. If ``vectorized`` is False, `fun` will always be called with ``y`` of shape ``(n,)``, where ``n = len(y0)``. If ``vectorized`` is True, `fun` may be called with ``y`` of shape ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of the returned array is the time derivative of the state corresponding with a column of ``y``). Setting ``vectorized=True`` allows for faster finite difference approximation of the Jacobian by methods 'Radau' and 'BDF', but will result in slower execution for other methods. It can also result in slower overall execution for 'Radau' and 'BDF' in some circumstances (e.g. small ``len(y0)``). support_complex : bool, optional Whether integration in a complex domain should be supported. Generally determined by a derived solver class capabilities. Default is False. Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. step_size : float Size of the last successful step. None if no steps were made yet. nfev : int Number of the system's rhs evaluations. njev : int Number of the Jacobian evaluations. nlu : int Number of LU decompositions. """ TOO_SMALL_STEP = "Required step size is less than spacing between numbers." def __init__(self, fun, t0, y0, t_bound, vectorized, support_complex=False): self.t_old = None self.t = t0 self._fun, self.y = check_arguments(fun, y0, support_complex) self.t_bound = t_bound self.vectorized = vectorized if vectorized: def fun_single(t, y): return self._fun(t, y[:, None]).ravel() fun_vectorized = self._fun else: fun_single = self._fun def fun_vectorized(t, y): f = np.empty_like(y) for i, yi in enumerate(y.T): f[:, i] = self._fun(t, yi) return f def fun(t, y): self.nfev += 1 return self.fun_single(t, y) self.fun = fun self.fun_single = fun_single self.fun_vectorized = fun_vectorized self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1 self.n = self.y.size self.status = 'running' self.nfev = 0 self.njev = 0 self.nlu = 0 @property def step_size(self): if self.t_old is None: return None else: return np.abs(self.t - self.t_old) def step(self): """Perform one integration step. Returns ------- message : string or None Report from the solver. Typically a reason for a failure if `self.status` is 'failed' after the step was taken or None otherwise. """ if self.status != 'running': raise RuntimeError("Attempt to step on a failed or finished " "solver.") if self.n == 0 or self.t == self.t_bound: # Handle corner cases of empty solver or no integration. self.t_old = self.t self.t = self.t_bound message = None self.status = 'finished' else: t = self.t success, message = self._step_impl() if not success: self.status = 'failed' else: self.t_old = t if self.direction * (self.t - self.t_bound) >= 0: self.status = 'finished' return message def dense_output(self): """Compute a local interpolant over the last successful step. Returns ------- sol : `DenseOutput` Local interpolant over the last successful step. """ if self.t_old is None: raise RuntimeError("Dense output is available after a successful " "step was made.") if self.n == 0 or self.t == self.t_old: # Handle corner cases of empty solver and no integration. return ConstantDenseOutput(self.t_old, self.t, self.y) else: return self._dense_output_impl() def _step_impl(self): raise NotImplementedError def _dense_output_impl(self): raise NotImplementedError class DenseOutput: """Base class for local interpolant over step made by an ODE solver. It interpolates between `t_min` and `t_max` (see Attributes below). Evaluation outside this interval is not forbidden, but the accuracy is not guaranteed. Attributes ---------- t_min, t_max : float Time range of the interpolation. """ def __init__(self, t_old, t): self.t_old = t_old self.t = t self.t_min = min(t, t_old) self.t_max = max(t, t_old) def __call__(self, t): """Evaluate the interpolant. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate the solution at. Returns ------- y : ndarray, shape (n,) or (n, n_points) Computed values. Shape depends on whether `t` was a scalar or a 1-D array. """ t = np.asarray(t) if t.ndim > 1: raise ValueError("`t` must be a float or a 1-D array.") return self._call_impl(t) def _call_impl(self, t): raise NotImplementedError class ConstantDenseOutput(DenseOutput): """Constant value interpolator. This class used for degenerate integration cases: equal integration limits or a system with 0 equations. """ def __init__(self, t_old, t, value): super().__init__(t_old, t) self.value = value def _call_impl(self, t): if t.ndim == 0: return self.value else: ret = np.empty((self.value.shape[0], t.shape[0])) ret[:] = self.value[:, None] return ret
10,295
34.381443
84
py
scipy
scipy-main/scipy/integrate/_ivp/lsoda.py
import numpy as np from scipy.integrate import ode from .common import validate_tol, validate_first_step, warn_extraneous from .base import OdeSolver, DenseOutput class LSODA(OdeSolver): """Adams/BDF method with automatic stiffness detection and switching. This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches automatically between the nonstiff Adams method and the stiff BDF method. The method was originally detailed in [2]_. Parameters ---------- fun : callable Right-hand side of the system: the time derivative of the state ``y`` at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must return an array of the same shape as ``y``. See `vectorized` for more information. t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time - the integration won't continue beyond it. It also determines the direction of the integration. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. min_step : float, optional Minimum allowed step size. Default is 0.0, i.e., the step size is not bounded and determined solely by the solver. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e., the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. jac : None or callable, optional Jacobian matrix of the right-hand side of the system with respect to ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. The function will be called as ``jac(t, y)``. If None (default), the Jacobian will be approximated by finite differences. It is generally recommended to provide the Jacobian rather than relying on a finite-difference approximation. lband, uband : int or None Parameters defining the bandwidth of the Jacobian, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting these requires your jac routine to return the Jacobian in the packed format: the returned array must have ``n`` columns and ``uband + lband + 1`` rows in which Jacobian diagonals are written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used in `scipy.linalg.solve_banded` (check for an illustration). These parameters can be also used with ``jac=None`` to reduce the number of Jacobian elements estimated by finite differences. vectorized : bool, optional Whether `fun` may be called in a vectorized fashion. False (default) is recommended for this solver. If ``vectorized`` is False, `fun` will always be called with ``y`` of shape ``(n,)``, where ``n = len(y0)``. If ``vectorized`` is True, `fun` may be called with ``y`` of shape ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of the returned array is the time derivative of the state corresponding with a column of ``y``). Setting ``vectorized=True`` allows for faster finite difference approximation of the Jacobian by methods 'Radau' and 'BDF', but will result in slower execution for this solver. Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. nfev : int Number of evaluations of the right-hand side. njev : int Number of evaluations of the Jacobian. References ---------- .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE Solvers," IMACS Transactions on Scientific Computation, Vol 1., pp. 55-64, 1983. .. [2] L. Petzold, "Automatic selection of methods for solving stiff and nonstiff systems of ordinary differential equations", SIAM Journal on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, 1983. """ def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0, max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None, uband=None, vectorized=False, **extraneous): warn_extraneous(extraneous) super().__init__(fun, t0, y0, t_bound, vectorized) if first_step is None: first_step = 0 # LSODA value for automatic selection. else: first_step = validate_first_step(first_step, t0, t_bound) first_step *= self.direction if max_step == np.inf: max_step = 0 # LSODA value for infinity. elif max_step <= 0: raise ValueError("`max_step` must be positive.") if min_step < 0: raise ValueError("`min_step` must be nonnegative.") rtol, atol = validate_tol(rtol, atol, self.n) solver = ode(self.fun, jac) solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step, min_step=min_step, first_step=first_step, lband=lband, uband=uband) solver.set_initial_value(y0, t0) # Inject t_bound into rwork array as needed for itask=5. solver._integrator.rwork[0] = self.t_bound solver._integrator.call_args[4] = solver._integrator.rwork self._lsoda_solver = solver def _step_impl(self): solver = self._lsoda_solver integrator = solver._integrator # From lsoda.step and lsoda.integrate itask=5 means take a single # step and do not go past t_bound. itask = integrator.call_args[2] integrator.call_args[2] = 5 solver._y, solver.t = integrator.run( solver.f, solver.jac or (lambda: None), solver._y, solver.t, self.t_bound, solver.f_params, solver.jac_params) integrator.call_args[2] = itask if solver.successful(): self.t = solver.t self.y = solver._y # From LSODA Fortran source njev is equal to nlu. self.njev = integrator.iwork[12] self.nlu = integrator.iwork[12] return True, None else: return False, 'Unexpected istate in LSODA.' def _dense_output_impl(self): iwork = self._lsoda_solver._integrator.iwork rwork = self._lsoda_solver._integrator.rwork # We want to produce the Nordsieck history array, yh, up to the order # used in the last successful iteration. The step size is unimportant # because it will be scaled out in LsodaDenseOutput. Some additional # work may be required because ODEPACK's LSODA implementation produces # the Nordsieck history in the state needed for the next iteration. # iwork[13] contains order from last successful iteration, while # iwork[14] contains order to be attempted next. order = iwork[13] # rwork[11] contains the step size to be attempted next, while # rwork[10] contains step size from last successful iteration. h = rwork[11] # rwork[20:20 + (iwork[14] + 1) * self.n] contains entries of the # Nordsieck array in state needed for next iteration. We want # the entries up to order for the last successful step so use the # following. yh = np.reshape(rwork[20:20 + (order + 1) * self.n], (self.n, order + 1), order='F').copy() if iwork[14] < order: # If the order is set to decrease then the final column of yh # has not been updated within ODEPACK's LSODA # implementation because this column will not be used in the # next iteration. We must rescale this column to make the # associated step size consistent with the other columns. yh[:, -1] *= (h / rwork[10]) ** order return LsodaDenseOutput(self.t_old, self.t, h, order, yh) class LsodaDenseOutput(DenseOutput): def __init__(self, t_old, t, h, order, yh): super().__init__(t_old, t) self.h = h self.yh = yh self.p = np.arange(order + 1) def _call_impl(self, t): if t.ndim == 0: x = ((t - self.t) / self.h) ** self.p else: x = ((t - self.t) / self.h) ** self.p[:, None] return np.dot(self.yh, x)
9,927
43.124444
84
py
scipy
scipy-main/scipy/integrate/_ivp/ivp.py
import inspect import numpy as np from .bdf import BDF from .radau import Radau from .rk import RK23, RK45, DOP853 from .lsoda import LSODA from scipy.optimize import OptimizeResult from .common import EPS, OdeSolution from .base import OdeSolver METHODS = {'RK23': RK23, 'RK45': RK45, 'DOP853': DOP853, 'Radau': Radau, 'BDF': BDF, 'LSODA': LSODA} MESSAGES = {0: "The solver successfully reached the end of the integration interval.", 1: "A termination event occurred."} class OdeResult(OptimizeResult): pass def prepare_events(events): """Standardize event functions and extract is_terminal and direction.""" if callable(events): events = (events,) if events is not None: is_terminal = np.empty(len(events), dtype=bool) direction = np.empty(len(events)) for i, event in enumerate(events): try: is_terminal[i] = event.terminal except AttributeError: is_terminal[i] = False try: direction[i] = event.direction except AttributeError: direction[i] = 0 else: is_terminal = None direction = None return events, is_terminal, direction def solve_event_equation(event, sol, t_old, t): """Solve an equation corresponding to an ODE event. The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an ODE solver using some sort of interpolation. It is solved by `scipy.optimize.brentq` with xtol=atol=4*EPS. Parameters ---------- event : callable Function ``event(t, y)``. sol : callable Function ``sol(t)`` which evaluates an ODE solution between `t_old` and `t`. t_old, t : float Previous and new values of time. They will be used as a bracketing interval. Returns ------- root : float Found solution. """ from scipy.optimize import brentq return brentq(lambda t: event(t, sol(t)), t_old, t, xtol=4 * EPS, rtol=4 * EPS) def handle_events(sol, events, active_events, is_terminal, t_old, t): """Helper function to handle events. Parameters ---------- sol : DenseOutput Function ``sol(t)`` which evaluates an ODE solution between `t_old` and `t`. events : list of callables, length n_events Event functions with signatures ``event(t, y)``. active_events : ndarray Indices of events which occurred. is_terminal : ndarray, shape (n_events,) Which events are terminal. t_old, t : float Previous and new values of time. Returns ------- root_indices : ndarray Indices of events which take zero between `t_old` and `t` and before a possible termination. roots : ndarray Values of t at which events occurred. terminate : bool Whether a terminal event occurred. """ roots = [solve_event_equation(events[event_index], sol, t_old, t) for event_index in active_events] roots = np.asarray(roots) if np.any(is_terminal[active_events]): if t > t_old: order = np.argsort(roots) else: order = np.argsort(-roots) active_events = active_events[order] roots = roots[order] t = np.nonzero(is_terminal[active_events])[0][0] active_events = active_events[:t + 1] roots = roots[:t + 1] terminate = True else: terminate = False return active_events, roots, terminate def find_active_events(g, g_new, direction): """Find which event occurred during an integration step. Parameters ---------- g, g_new : array_like, shape (n_events,) Values of event functions at a current and next points. direction : ndarray, shape (n_events,) Event "direction" according to the definition in `solve_ivp`. Returns ------- active_events : ndarray Indices of events which occurred during the step. """ g, g_new = np.asarray(g), np.asarray(g_new) up = (g <= 0) & (g_new >= 0) down = (g >= 0) & (g_new <= 0) either = up | down mask = (up & (direction > 0) | down & (direction < 0) | either & (direction == 0)) return np.nonzero(mask)[0] def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False, events=None, vectorized=False, args=None, **options): """Solve an initial value problem for a system of ODEs. This function numerically integrates a system of ordinary differential equations given an initial value:: dy / dt = f(t, y) y(t0) = y0 Here t is a 1-D independent variable (time), y(t) is an N-D vector-valued function (state), and an N-D vector-valued function f(t, y) determines the differential equations. The goal is to find y(t) approximately satisfying the differential equations, given an initial value y(t0)=y0. Some of the solvers support integration in the complex domain, but note that for stiff ODE solvers, the right-hand side must be complex-differentiable (satisfy Cauchy-Riemann equations [11]_). To solve a problem in the complex domain, pass y0 with a complex data type. Another option always available is to rewrite your problem for real and imaginary parts separately. Parameters ---------- fun : callable Right-hand side of the system: the time derivative of the state ``y`` at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must return an array of the same shape as ``y``. See `vectorized` for more information. t_span : 2-member sequence Interval of integration (t0, tf). The solver starts with t=t0 and integrates until it reaches t=tf. Both t0 and tf must be floats or values interpretable by the float conversion function. y0 : array_like, shape (n,) Initial state. For problems in the complex domain, pass `y0` with a complex data type (even if the initial value is purely real). method : string or `OdeSolver`, optional Integration method to use: * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_. The error is controlled assuming accuracy of the fourth-order method, but steps are taken using the fifth-order accurate formula (local extrapolation is done). A quartic interpolation polynomial is used for the dense output [2]_. Can be applied in the complex domain. * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error is controlled assuming accuracy of the second-order method, but steps are taken using the third-order accurate formula (local extrapolation is done). A cubic Hermite polynomial is used for the dense output. Can be applied in the complex domain. * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_. Python implementation of the "DOP853" algorithm originally written in Fortran [14]_. A 7-th order interpolation polynomial accurate to 7-th order is used for the dense output. Can be applied in the complex domain. * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of order 5 [4]_. The error is controlled with a third-order accurate embedded formula. A cubic polynomial which satisfies the collocation conditions is used for the dense output. * 'BDF': Implicit multi-step variable-order (1 to 5) method based on a backward differentiation formula for the derivative approximation [5]_. The implementation follows the one described in [6]_. A quasi-constant step scheme is used and accuracy is enhanced using the NDF modification. Can be applied in the complex domain. * 'LSODA': Adams/BDF method with automatic stiffness detection and switching [7]_, [8]_. This is a wrapper of the Fortran solver from ODEPACK. Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used for non-stiff problems and implicit methods ('Radau', 'BDF') for stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended for solving with high precision (low values of `rtol` and `atol`). If not sure, first try to run 'RK45'. If it makes unusually many iterations, diverges, or fails, your problem is likely to be stiff and you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal choice, but it might be somewhat less convenient to work with as it wraps old Fortran code. You can also pass an arbitrary class derived from `OdeSolver` which implements the solver. t_eval : array_like or None, optional Times at which to store the computed solution, must be sorted and lie within `t_span`. If None (default), use points selected by the solver. dense_output : bool, optional Whether to compute a continuous solution. Default is False. events : callable, or list of callables, optional Events to track. If None (default), no events will be tracked. Each event occurs at the zeros of a continuous function of time and state. Each function must have the signature ``event(t, y)`` and return a float. The solver will find an accurate value of `t` at which ``event(t, y(t)) = 0`` using a root-finding algorithm. By default, all zeros will be found. The solver looks for a sign change over each step, so if multiple zero crossings occur within one step, events may be missed. Additionally each `event` function might have the following attributes: terminal: bool, optional Whether to terminate integration if this event occurs. Implicitly False if not assigned. direction: float, optional Direction of a zero crossing. If `direction` is positive, `event` will only trigger when going from negative to positive, and vice versa if `direction` is negative. If 0, then either direction will trigger event. Implicitly 0 if not assigned. You can assign attributes like ``event.terminal = True`` to any function in Python. vectorized : bool, optional Whether `fun` can be called in a vectorized fashion. Default is False. If ``vectorized`` is False, `fun` will always be called with ``y`` of shape ``(n,)``, where ``n = len(y0)``. If ``vectorized`` is True, `fun` may be called with ``y`` of shape ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of the returned array is the time derivative of the state corresponding with a column of ``y``). Setting ``vectorized=True`` allows for faster finite difference approximation of the Jacobian by methods 'Radau' and 'BDF', but will result in slower execution for other methods and for 'Radau' and 'BDF' in some circumstances (e.g. small ``len(y0)``). args : tuple, optional Additional arguments to pass to the user-defined functions. If given, the additional arguments are passed to all user-defined functions. So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``, then `jac` (if given) and any event functions must have the same signature, and `args` must be a tuple of length 3. **options Options passed to a chosen solver. All options available for already implemented solvers are listed below. first_step : float or None, optional Initial step size. Default is `None` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e., the step size is not bounded and determined solely by the solver. rtol, atol : float or array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. jac : array_like, sparse_matrix, callable or None, optional Jacobian matrix of the right-hand side of the system with respect to y, required by the 'Radau', 'BDF' and 'LSODA' method. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. There are three ways to define the Jacobian: * If array_like or sparse_matrix, the Jacobian is assumed to be constant. Not supported by 'LSODA'. * If callable, the Jacobian is assumed to depend on both t and y; it will be called as ``jac(t, y)``, as necessary. For 'Radau' and 'BDF' methods, the return value might be a sparse matrix. * If None (default), the Jacobian will be approximated by finite differences. It is generally recommended to provide the Jacobian rather than relying on a finite-difference approximation. jac_sparsity : array_like, sparse matrix or None, optional Defines a sparsity structure of the Jacobian matrix for a finite- difference approximation. Its shape must be (n, n). This argument is ignored if `jac` is not `None`. If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [10]_. A zero entry means that a corresponding element in the Jacobian is always zero. If None (default), the Jacobian is assumed to be dense. Not supported by 'LSODA', see `lband` and `uband` instead. lband, uband : int or None, optional Parameters defining the bandwidth of the Jacobian for the 'LSODA' method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Default is None. Setting these requires your jac routine to return the Jacobian in the packed format: the returned array must have ``n`` columns and ``uband + lband + 1`` rows in which Jacobian diagonals are written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used in `scipy.linalg.solve_banded` (check for an illustration). These parameters can be also used with ``jac=None`` to reduce the number of Jacobian elements estimated by finite differences. min_step : float, optional The minimum allowed step size for 'LSODA' method. By default `min_step` is zero. Returns ------- Bunch object with the following fields defined: t : ndarray, shape (n_points,) Time points. y : ndarray, shape (n, n_points) Values of the solution at `t`. sol : `OdeSolution` or None Found solution as `OdeSolution` instance; None if `dense_output` was set to False. t_events : list of ndarray or None Contains for each event type a list of arrays at which an event of that type event was detected. None if `events` was None. y_events : list of ndarray or None For each value of `t_events`, the corresponding value of the solution. None if `events` was None. nfev : int Number of evaluations of the right-hand side. njev : int Number of evaluations of the Jacobian. nlu : int Number of LU decompositions. status : int Reason for algorithm termination: * -1: Integration step failed. * 0: The solver successfully reached the end of `tspan`. * 1: A termination event occurred. message : string Human-readable description of the termination reason. success : bool True if the solver reached the interval end or a termination event occurred (``status >= 0``). References ---------- .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta formulae", Journal of Computational and Applied Mathematics, Vol. 6, No. 1, pp. 19-26, 1980. .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. .. [5] `Backward Differentiation Formula <https://en.wikipedia.org/wiki/Backward_differentiation_formula>`_ on Wikipedia. .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE Solvers," IMACS Transactions on Scientific Computation, Vol 1., pp. 55-64, 1983. .. [8] L. Petzold, "Automatic selection of methods for solving stiff and nonstiff systems of ordinary differential equations", SIAM Journal on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, 1983. .. [9] `Stiff equation <https://en.wikipedia.org/wiki/Stiff_equation>`_ on Wikipedia. .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. .. [11] `Cauchy-Riemann equations <https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on Wikipedia. .. [12] `Lotka-Volterra equations <https://en.wikipedia.org/wiki/Lotka%E2%80%93Volterra_equations>`_ on Wikipedia. .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential Equations I: Nonstiff Problems", Sec. II. .. [14] `Page with original Fortran code of DOP853 <http://www.unige.ch/~hairer/software.html>`_. Examples -------- Basic exponential decay showing automatically chosen time points. >>> import numpy as np >>> from scipy.integrate import solve_ivp >>> def exponential_decay(t, y): return -0.5 * y >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8]) >>> print(sol.t) [ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806 8.33328988 10. ] >>> print(sol.y) [[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045 0.03107158 0.01350781] [4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091 0.06214316 0.02701561] [8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181 0.12428631 0.05403123]] Specifying points where the solution is desired. >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8], ... t_eval=[0, 1, 2, 4, 10]) >>> print(sol.t) [ 0 1 2 4 10] >>> print(sol.y) [[2. 1.21305369 0.73534021 0.27066736 0.01350938] [4. 2.42610739 1.47068043 0.54133472 0.02701876] [8. 4.85221478 2.94136085 1.08266944 0.05403753]] Cannon fired upward with terminal event upon impact. The ``terminal`` and ``direction`` fields of an event are applied by monkey patching a function. Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts at position 0 with velocity +10. Note that the integration never reaches t=100 because the event is terminal. >>> def upward_cannon(t, y): return [y[1], -0.5] >>> def hit_ground(t, y): return y[0] >>> hit_ground.terminal = True >>> hit_ground.direction = -1 >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground) >>> print(sol.t_events) [array([40.])] >>> print(sol.t) [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01] Use `dense_output` and `events` to find position, which is 100, at the apex of the cannonball's trajectory. Apex is not defined as terminal, so both apex and hit_ground are found. There is no information at t=20, so the sol attribute is used to evaluate the solution. The sol attribute is returned by setting ``dense_output=True``. Alternatively, the `y_events` attribute can be used to access the solution at the time of the event. >>> def apex(t, y): return y[1] >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], ... events=(hit_ground, apex), dense_output=True) >>> print(sol.t_events) [array([40.]), array([20.])] >>> print(sol.t) [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01] >>> print(sol.sol(sol.t_events[1][0])) [100. 0.] >>> print(sol.y_events) [array([[-5.68434189e-14, -1.00000000e+01]]), array([[1.00000000e+02, 1.77635684e-15]])] As an example of a system with additional parameters, we'll implement the Lotka-Volterra equations [12]_. >>> def lotkavolterra(t, z, a, b, c, d): ... x, y = z ... return [a*x - b*x*y, -c*y + d*x*y] ... We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args` argument. >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1), ... dense_output=True) Compute a dense solution and plot it. >>> t = np.linspace(0, 15, 300) >>> z = sol.sol(t) >>> import matplotlib.pyplot as plt >>> plt.plot(t, z.T) >>> plt.xlabel('t') >>> plt.legend(['x', 'y'], shadow=True) >>> plt.title('Lotka-Volterra System') >>> plt.show() """ if method not in METHODS and not ( inspect.isclass(method) and issubclass(method, OdeSolver)): raise ValueError("`method` must be one of {} or OdeSolver class." .format(METHODS)) t0, tf = map(float, t_span) if args is not None: # Wrap the user's fun (and jac, if given) in lambdas to hide the # additional parameters. Pass in the original fun as a keyword # argument to keep it in the scope of the lambda. try: _ = [*(args)] except TypeError as exp: suggestion_tuple = ( "Supplied 'args' cannot be unpacked. Please supply `args`" f" as a tuple (e.g. `args=({args},)`)" ) raise TypeError(suggestion_tuple) from exp def fun(t, x, fun=fun): return fun(t, x, *args) jac = options.get('jac') if callable(jac): options['jac'] = lambda t, x: jac(t, x, *args) if t_eval is not None: t_eval = np.asarray(t_eval) if t_eval.ndim != 1: raise ValueError("`t_eval` must be 1-dimensional.") if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)): raise ValueError("Values in `t_eval` are not within `t_span`.") d = np.diff(t_eval) if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0): raise ValueError("Values in `t_eval` are not properly sorted.") if tf > t0: t_eval_i = 0 else: # Make order of t_eval decreasing to use np.searchsorted. t_eval = t_eval[::-1] # This will be an upper bound for slices. t_eval_i = t_eval.shape[0] if method in METHODS: method = METHODS[method] solver = method(fun, t0, y0, tf, vectorized=vectorized, **options) if t_eval is None: ts = [t0] ys = [y0] elif t_eval is not None and dense_output: ts = [] ti = [t0] ys = [] else: ts = [] ys = [] interpolants = [] events, is_terminal, event_dir = prepare_events(events) if events is not None: if args is not None: # Wrap user functions in lambdas to hide the additional parameters. # The original event function is passed as a keyword argument to the # lambda to keep the original function in scope (i.e., avoid the # late binding closure "gotcha"). events = [lambda t, x, event=event: event(t, x, *args) for event in events] g = [event(t0, y0) for event in events] t_events = [[] for _ in range(len(events))] y_events = [[] for _ in range(len(events))] else: t_events = None y_events = None status = None while status is None: message = solver.step() if solver.status == 'finished': status = 0 elif solver.status == 'failed': status = -1 break t_old = solver.t_old t = solver.t y = solver.y if dense_output: sol = solver.dense_output() interpolants.append(sol) else: sol = None if events is not None: g_new = [event(t, y) for event in events] active_events = find_active_events(g, g_new, event_dir) if active_events.size > 0: if sol is None: sol = solver.dense_output() root_indices, roots, terminate = handle_events( sol, events, active_events, is_terminal, t_old, t) for e, te in zip(root_indices, roots): t_events[e].append(te) y_events[e].append(sol(te)) if terminate: status = 1 t = roots[-1] y = sol(t) g = g_new if t_eval is None: ts.append(t) ys.append(y) else: # The value in t_eval equal to t will be included. if solver.direction > 0: t_eval_i_new = np.searchsorted(t_eval, t, side='right') t_eval_step = t_eval[t_eval_i:t_eval_i_new] else: t_eval_i_new = np.searchsorted(t_eval, t, side='left') # It has to be done with two slice operations, because # you can't slice to 0th element inclusive using backward # slicing. t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1] if t_eval_step.size > 0: if sol is None: sol = solver.dense_output() ts.append(t_eval_step) ys.append(sol(t_eval_step)) t_eval_i = t_eval_i_new if t_eval is not None and dense_output: ti.append(t) message = MESSAGES.get(status, message) if t_events is not None: t_events = [np.asarray(te) for te in t_events] y_events = [np.asarray(ye) for ye in y_events] if t_eval is None: ts = np.array(ts) ys = np.vstack(ys).T elif ts: ts = np.hstack(ts) ys = np.hstack(ys) if dense_output: if t_eval is None: sol = OdeSolution( ts, interpolants, alt_segment=True if method in [BDF, LSODA] else False ) else: sol = OdeSolution( ti, interpolants, alt_segment=True if method in [BDF, LSODA] else False ) else: sol = None return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events, nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu, status=status, message=message, success=status >= 0)
28,893
40.634006
92
py
scipy
scipy-main/scipy/integrate/_ivp/bdf.py
import numpy as np from scipy.linalg import lu_factor, lu_solve from scipy.sparse import issparse, csc_matrix, eye from scipy.sparse.linalg import splu from scipy.optimize._numdiff import group_columns from .common import (validate_max_step, validate_tol, select_initial_step, norm, EPS, num_jac, validate_first_step, warn_extraneous) from .base import OdeSolver, DenseOutput MAX_ORDER = 5 NEWTON_MAXITER = 4 MIN_FACTOR = 0.2 MAX_FACTOR = 10 def compute_R(order, factor): """Compute the matrix for changing the differences array.""" I = np.arange(1, order + 1)[:, None] J = np.arange(1, order + 1) M = np.zeros((order + 1, order + 1)) M[1:, 1:] = (I - 1 - factor * J) / I M[0] = 1 return np.cumprod(M, axis=0) def change_D(D, order, factor): """Change differences array in-place when step size is changed.""" R = compute_R(order, factor) U = compute_R(order, 1) RU = R.dot(U) D[:order + 1] = np.dot(RU.T, D[:order + 1]) def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol): """Solve the algebraic system resulting from BDF method.""" d = 0 y = y_predict.copy() dy_norm_old = None converged = False for k in range(NEWTON_MAXITER): f = fun(t_new, y) if not np.all(np.isfinite(f)): break dy = solve_lu(LU, c * f - psi - d) dy_norm = norm(dy / scale) if dy_norm_old is None: rate = None else: rate = dy_norm / dy_norm_old if (rate is not None and (rate >= 1 or rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)): break y += dy d += dy if (dy_norm == 0 or rate is not None and rate / (1 - rate) * dy_norm < tol): converged = True break dy_norm_old = dy_norm return converged, k + 1, y, d class BDF(OdeSolver): """Implicit method based on backward-differentiation formulas. This is a variable order method with the order varying automatically from 1 to 5. The general framework of the BDF algorithm is described in [1]_. This class implements a quasi-constant step size as explained in [2]_. The error estimation strategy for the constant-step BDF is derived in [3]_. An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented. Can be applied in the complex domain. Parameters ---------- fun : callable Right-hand side of the system: the time derivative of the state ``y`` at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must return an array of the same shape as ``y``. See `vectorized` for more information. t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time - the integration won't continue beyond it. It also determines the direction of the integration. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e., the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. jac : {None, array_like, sparse_matrix, callable}, optional Jacobian matrix of the right-hand side of the system with respect to y, required by this method. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. There are three ways to define the Jacobian: * If array_like or sparse_matrix, the Jacobian is assumed to be constant. * If callable, the Jacobian is assumed to depend on both t and y; it will be called as ``jac(t, y)`` as necessary. For the 'Radau' and 'BDF' methods, the return value might be a sparse matrix. * If None (default), the Jacobian will be approximated by finite differences. It is generally recommended to provide the Jacobian rather than relying on a finite-difference approximation. jac_sparsity : {None, array_like, sparse matrix}, optional Defines a sparsity structure of the Jacobian matrix for a finite-difference approximation. Its shape must be (n, n). This argument is ignored if `jac` is not `None`. If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [4]_. A zero entry means that a corresponding element in the Jacobian is always zero. If None (default), the Jacobian is assumed to be dense. vectorized : bool, optional Whether `fun` can be called in a vectorized fashion. Default is False. If ``vectorized`` is False, `fun` will always be called with ``y`` of shape ``(n,)``, where ``n = len(y0)``. If ``vectorized`` is True, `fun` may be called with ``y`` of shape ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of the returned array is the time derivative of the state corresponding with a column of ``y``). Setting ``vectorized=True`` allows for faster finite difference approximation of the Jacobian by this method, but may result in slower execution overall in some circumstances (e.g. small ``len(y0)``). Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. step_size : float Size of the last successful step. None if no steps were made yet. nfev : int Number of evaluations of the right-hand side. njev : int Number of evaluations of the Jacobian. nlu : int Number of LU decompositions. References ---------- .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical Solution of Ordinary Differential Equations", ACM Transactions on Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975. .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I: Nonstiff Problems", Sec. III.2. .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. """ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, vectorized=False, first_step=None, **extraneous): warn_extraneous(extraneous) super().__init__(fun, t0, y0, t_bound, vectorized, support_complex=True) self.max_step = validate_max_step(max_step) self.rtol, self.atol = validate_tol(rtol, atol, self.n) f = self.fun(self.t, self.y) if first_step is None: self.h_abs = select_initial_step(self.fun, self.t, self.y, f, self.direction, 1, self.rtol, self.atol) else: self.h_abs = validate_first_step(first_step, t0, t_bound) self.h_abs_old = None self.error_norm_old = None self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) self.jac_factor = None self.jac, self.J = self._validate_jac(jac, jac_sparsity) if issparse(self.J): def lu(A): self.nlu += 1 return splu(A) def solve_lu(LU, b): return LU.solve(b) I = eye(self.n, format='csc', dtype=self.y.dtype) else: def lu(A): self.nlu += 1 return lu_factor(A, overwrite_a=True) def solve_lu(LU, b): return lu_solve(LU, b, overwrite_b=True) I = np.identity(self.n, dtype=self.y.dtype) self.lu = lu self.solve_lu = solve_lu self.I = I kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0]) self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1)))) self.alpha = (1 - kappa) * self.gamma self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2) D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype) D[0] = self.y D[1] = f * self.h_abs * self.direction self.D = D self.order = 1 self.n_equal_steps = 0 self.LU = None def _validate_jac(self, jac, sparsity): t0 = self.t y0 = self.y if jac is None: if sparsity is not None: if issparse(sparsity): sparsity = csc_matrix(sparsity) groups = group_columns(sparsity) sparsity = (sparsity, groups) def jac_wrapped(t, y): self.njev += 1 f = self.fun_single(t, y) J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, self.atol, self.jac_factor, sparsity) return J J = jac_wrapped(t0, y0) elif callable(jac): J = jac(t0, y0) self.njev += 1 if issparse(J): J = csc_matrix(J, dtype=y0.dtype) def jac_wrapped(t, y): self.njev += 1 return csc_matrix(jac(t, y), dtype=y0.dtype) else: J = np.asarray(J, dtype=y0.dtype) def jac_wrapped(t, y): self.njev += 1 return np.asarray(jac(t, y), dtype=y0.dtype) if J.shape != (self.n, self.n): raise ValueError("`jac` is expected to have shape {}, but " "actually has {}." .format((self.n, self.n), J.shape)) else: if issparse(jac): J = csc_matrix(jac, dtype=y0.dtype) else: J = np.asarray(jac, dtype=y0.dtype) if J.shape != (self.n, self.n): raise ValueError("`jac` is expected to have shape {}, but " "actually has {}." .format((self.n, self.n), J.shape)) jac_wrapped = None return jac_wrapped, J def _step_impl(self): t = self.t D = self.D max_step = self.max_step min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) if self.h_abs > max_step: h_abs = max_step change_D(D, self.order, max_step / self.h_abs) self.n_equal_steps = 0 elif self.h_abs < min_step: h_abs = min_step change_D(D, self.order, min_step / self.h_abs) self.n_equal_steps = 0 else: h_abs = self.h_abs atol = self.atol rtol = self.rtol order = self.order alpha = self.alpha gamma = self.gamma error_const = self.error_const J = self.J LU = self.LU current_jac = self.jac is None step_accepted = False while not step_accepted: if h_abs < min_step: return False, self.TOO_SMALL_STEP h = h_abs * self.direction t_new = t + h if self.direction * (t_new - self.t_bound) > 0: t_new = self.t_bound change_D(D, order, np.abs(t_new - t) / h_abs) self.n_equal_steps = 0 LU = None h = t_new - t h_abs = np.abs(h) y_predict = np.sum(D[:order + 1], axis=0) scale = atol + rtol * np.abs(y_predict) psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order] converged = False c = h / alpha[order] while not converged: if LU is None: LU = self.lu(self.I - c * J) converged, n_iter, y_new, d = solve_bdf_system( self.fun, t_new, y_predict, c, psi, LU, self.solve_lu, scale, self.newton_tol) if not converged: if current_jac: break J = self.jac(t_new, y_predict) LU = None current_jac = True if not converged: factor = 0.5 h_abs *= factor change_D(D, order, factor) self.n_equal_steps = 0 LU = None continue safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + n_iter) scale = atol + rtol * np.abs(y_new) error = error_const[order] * d error_norm = norm(error / scale) if error_norm > 1: factor = max(MIN_FACTOR, safety * error_norm ** (-1 / (order + 1))) h_abs *= factor change_D(D, order, factor) self.n_equal_steps = 0 # As we didn't have problems with convergence, we don't # reset LU here. else: step_accepted = True self.n_equal_steps += 1 self.t = t_new self.y = y_new self.h_abs = h_abs self.J = J self.LU = LU # Update differences. The principal relation here is # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D # contained difference for previous interpolating polynomial and # d = D^{k + 1} y_n. Thus this elegant code follows. D[order + 2] = d - D[order + 1] D[order + 1] = d for i in reversed(range(order + 1)): D[i] += D[i + 1] if self.n_equal_steps < order + 1: return True, None if order > 1: error_m = error_const[order - 1] * D[order] error_m_norm = norm(error_m / scale) else: error_m_norm = np.inf if order < MAX_ORDER: error_p = error_const[order + 1] * D[order + 2] error_p_norm = norm(error_p / scale) else: error_p_norm = np.inf error_norms = np.array([error_m_norm, error_norm, error_p_norm]) with np.errstate(divide='ignore'): factors = error_norms ** (-1 / np.arange(order, order + 3)) delta_order = np.argmax(factors) - 1 order += delta_order self.order = order factor = min(MAX_FACTOR, safety * np.max(factors)) self.h_abs *= factor change_D(D, order, factor) self.n_equal_steps = 0 self.LU = None return True, None def _dense_output_impl(self): return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction, self.order, self.D[:self.order + 1].copy()) class BdfDenseOutput(DenseOutput): def __init__(self, t_old, t, h, order, D): super().__init__(t_old, t) self.order = order self.t_shift = self.t - h * np.arange(self.order) self.denom = h * (1 + np.arange(self.order)) self.D = D def _call_impl(self, t): if t.ndim == 0: x = (t - self.t_shift) / self.denom p = np.cumprod(x) else: x = (t - self.t_shift[:, None]) / self.denom[:, None] p = np.cumprod(x, axis=0) y = np.dot(self.D[1:].T, p) if y.ndim == 1: y += self.D[0] else: y += self.D[0, :, None] return y
17,522
35.50625
83
py
scipy
scipy-main/scipy/integrate/_ivp/setup.py
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('_ivp', parent_package, top_path) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
343
27.666667
60
py
scipy
scipy-main/scipy/integrate/_ivp/rk.py
import numpy as np from .base import OdeSolver, DenseOutput from .common import (validate_max_step, validate_tol, select_initial_step, norm, warn_extraneous, validate_first_step) from . import dop853_coefficients # Multiply steps computed from asymptotic behaviour of errors by this. SAFETY = 0.9 MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. MAX_FACTOR = 10 # Maximum allowed increase in a step size. def rk_step(fun, t, y, f, h, A, B, C, K): """Perform a single Runge-Kutta step. This function computes a prediction of an explicit Runge-Kutta method and also estimates the error of a less accurate method. Notation for Butcher tableau is as in [1]_. Parameters ---------- fun : callable Right-hand side of the system. t : float Current time. y : ndarray, shape (n,) Current state. f : ndarray, shape (n,) Current value of the derivative, i.e., ``fun(x, y)``. h : float Step to use. A : ndarray, shape (n_stages, n_stages) Coefficients for combining previous RK stages to compute the next stage. For explicit methods the coefficients at and above the main diagonal are zeros. B : ndarray, shape (n_stages,) Coefficients for combining RK stages for computing the final prediction. C : ndarray, shape (n_stages,) Coefficients for incrementing time for consecutive RK stages. The value for the first stage is always zero. K : ndarray, shape (n_stages + 1, n) Storage array for putting RK stages here. Stages are stored in rows. The last row is a linear combination of the previous rows with coefficients Returns ------- y_new : ndarray, shape (n,) Solution at t + h computed with a higher accuracy. f_new : ndarray, shape (n,) Derivative ``fun(t + h, y_new)``. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential Equations I: Nonstiff Problems", Sec. II.4. """ K[0] = f for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1): dy = np.dot(K[:s].T, a[:s]) * h K[s] = fun(t + c * h, y + dy) y_new = y + h * np.dot(K[:-1].T, B) f_new = fun(t + h, y_new) K[-1] = f_new return y_new, f_new class RungeKutta(OdeSolver): """Base class for explicit Runge-Kutta methods.""" C: np.ndarray = NotImplemented A: np.ndarray = NotImplemented B: np.ndarray = NotImplemented E: np.ndarray = NotImplemented P: np.ndarray = NotImplemented order: int = NotImplemented error_estimator_order: int = NotImplemented n_stages: int = NotImplemented def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, rtol=1e-3, atol=1e-6, vectorized=False, first_step=None, **extraneous): warn_extraneous(extraneous) super().__init__(fun, t0, y0, t_bound, vectorized, support_complex=True) self.y_old = None self.max_step = validate_max_step(max_step) self.rtol, self.atol = validate_tol(rtol, atol, self.n) self.f = self.fun(self.t, self.y) if first_step is None: self.h_abs = select_initial_step( self.fun, self.t, self.y, self.f, self.direction, self.error_estimator_order, self.rtol, self.atol) else: self.h_abs = validate_first_step(first_step, t0, t_bound) self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype) self.error_exponent = -1 / (self.error_estimator_order + 1) self.h_previous = None def _estimate_error(self, K, h): return np.dot(K.T, self.E) * h def _estimate_error_norm(self, K, h, scale): return norm(self._estimate_error(K, h) / scale) def _step_impl(self): t = self.t y = self.y max_step = self.max_step rtol = self.rtol atol = self.atol min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) if self.h_abs > max_step: h_abs = max_step elif self.h_abs < min_step: h_abs = min_step else: h_abs = self.h_abs step_accepted = False step_rejected = False while not step_accepted: if h_abs < min_step: return False, self.TOO_SMALL_STEP h = h_abs * self.direction t_new = t + h if self.direction * (t_new - self.t_bound) > 0: t_new = self.t_bound h = t_new - t h_abs = np.abs(h) y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A, self.B, self.C, self.K) scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol error_norm = self._estimate_error_norm(self.K, h, scale) if error_norm < 1: if error_norm == 0: factor = MAX_FACTOR else: factor = min(MAX_FACTOR, SAFETY * error_norm ** self.error_exponent) if step_rejected: factor = min(1, factor) h_abs *= factor step_accepted = True else: h_abs *= max(MIN_FACTOR, SAFETY * error_norm ** self.error_exponent) step_rejected = True self.h_previous = h self.y_old = y self.t = t_new self.y = y_new self.h_abs = h_abs self.f = f_new return True, None def _dense_output_impl(self): Q = self.K.T.dot(self.P) return RkDenseOutput(self.t_old, self.t, self.y_old, Q) class RK23(RungeKutta): """Explicit Runge-Kutta method of order 3(2). This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled assuming accuracy of the second-order method, but steps are taken using the third-order accurate formula (local extrapolation is done). A cubic Hermite polynomial is used for the dense output. Can be applied in the complex domain. Parameters ---------- fun : callable Right-hand side of the system: the time derivative of the state ``y`` at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must return an array of the same shape as ``y``. See `vectorized` for more information. t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time - the integration won't continue beyond it. It also determines the direction of the integration. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e., the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. vectorized : bool, optional Whether `fun` may be called in a vectorized fashion. False (default) is recommended for this solver. If ``vectorized`` is False, `fun` will always be called with ``y`` of shape ``(n,)``, where ``n = len(y0)``. If ``vectorized`` is True, `fun` may be called with ``y`` of shape ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of the returned array is the time derivative of the state corresponding with a column of ``y``). Setting ``vectorized=True`` allows for faster finite difference approximation of the Jacobian by methods 'Radau' and 'BDF', but will result in slower execution for this solver. Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. step_size : float Size of the last successful step. None if no steps were made yet. nfev : int Number evaluations of the system's right-hand side. njev : int Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian. nlu : int Number of LU decompositions. Is always 0 for this solver. References ---------- .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. """ order = 3 error_estimator_order = 2 n_stages = 3 C = np.array([0, 1/2, 3/4]) A = np.array([ [0, 0, 0], [1/2, 0, 0], [0, 3/4, 0] ]) B = np.array([2/9, 1/3, 4/9]) E = np.array([5/72, -1/12, -1/9, 1/8]) P = np.array([[1, -4 / 3, 5 / 9], [0, 1, -2/3], [0, 4/3, -8/9], [0, -1, 1]]) class RK45(RungeKutta): """Explicit Runge-Kutta method of order 5(4). This uses the Dormand-Prince pair of formulas [1]_. The error is controlled assuming accuracy of the fourth-order method accuracy, but steps are taken using the fifth-order accurate formula (local extrapolation is done). A quartic interpolation polynomial is used for the dense output [2]_. Can be applied in the complex domain. Parameters ---------- fun : callable Right-hand side of the system. The calling signature is ``fun(t, y)``. Here ``t`` is a scalar, and there are two options for the ndarray ``y``: It can either have shape (n,); then ``fun`` must return array_like with shape (n,). Alternatively it can have shape (n, k); then ``fun`` must return an array_like with shape (n, k), i.e., each column corresponds to a single column in ``y``. The choice between the two options is determined by `vectorized` argument (see below). t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time - the integration won't continue beyond it. It also determines the direction of the integration. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e., the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. vectorized : bool, optional Whether `fun` is implemented in a vectorized fashion. Default is False. Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. step_size : float Size of the last successful step. None if no steps were made yet. nfev : int Number evaluations of the system's right-hand side. njev : int Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian. nlu : int Number of LU decompositions. Is always 0 for this solver. References ---------- .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta formulae", Journal of Computational and Applied Mathematics, Vol. 6, No. 1, pp. 19-26, 1980. .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. """ order = 5 error_estimator_order = 4 n_stages = 6 C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1]) A = np.array([ [0, 0, 0, 0, 0], [1/5, 0, 0, 0, 0], [3/40, 9/40, 0, 0, 0], [44/45, -56/15, 32/9, 0, 0], [19372/6561, -25360/2187, 64448/6561, -212/729, 0], [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656] ]) B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]) E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525, 1/40]) # Corresponds to the optimum value of c_6 from [2]_. P = np.array([ [1, -8048581381/2820520608, 8663915743/2820520608, -12715105075/11282082432], [0, 0, 0, 0], [0, 131558114200/32700410799, -68118460800/10900136933, 87487479700/32700410799], [0, -1754552775/470086768, 14199869525/1410260304, -10690763975/1880347072], [0, 127303824393/49829197408, -318862633887/49829197408, 701980252875 / 199316789632], [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844], [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]]) class DOP853(RungeKutta): """Explicit Runge-Kutta method of order 8. This is a Python implementation of "DOP853" algorithm originally written in Fortran [1]_, [2]_. Note that this is not a literate translation, but the algorithmic core and coefficients are the same. Can be applied in the complex domain. Parameters ---------- fun : callable Right-hand side of the system. The calling signature is ``fun(t, y)``. Here, ``t`` is a scalar, and there are two options for the ndarray ``y``: It can either have shape (n,); then ``fun`` must return array_like with shape (n,). Alternatively it can have shape (n, k); then ``fun`` must return an array_like with shape (n, k), i.e. each column corresponds to a single column in ``y``. The choice between the two options is determined by `vectorized` argument (see below). t0 : float Initial time. y0 : array_like, shape (n,) Initial state. t_bound : float Boundary time - the integration won't continue beyond it. It also determines the direction of the integration. first_step : float or None, optional Initial step size. Default is ``None`` which means that the algorithm should choose. max_step : float, optional Maximum allowed step size. Default is np.inf, i.e. the step size is not bounded and determined solely by the solver. rtol, atol : float and array_like, optional Relative and absolute tolerances. The solver keeps the local error estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a relative accuracy (number of correct digits), while `atol` controls absolute accuracy (number of correct decimal places). To achieve the desired `rtol`, set `atol` to be smaller than the smallest value that can be expected from ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. Conversely, to achieve the desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller than `atol`. If components of y have different scales, it might be beneficial to set different `atol` values for different components by passing array_like with shape (n,) for `atol`. Default values are 1e-3 for `rtol` and 1e-6 for `atol`. vectorized : bool, optional Whether `fun` is implemented in a vectorized fashion. Default is False. Attributes ---------- n : int Number of equations. status : string Current status of the solver: 'running', 'finished' or 'failed'. t_bound : float Boundary time. direction : float Integration direction: +1 or -1. t : float Current time. y : ndarray Current state. t_old : float Previous time. None if no steps were made yet. step_size : float Size of the last successful step. None if no steps were made yet. nfev : int Number evaluations of the system's right-hand side. njev : int Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian. nlu : int Number of LU decompositions. Is always 0 for this solver. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential Equations I: Nonstiff Problems", Sec. II. .. [2] `Page with original Fortran code of DOP853 <http://www.unige.ch/~hairer/software.html>`_. """ n_stages = dop853_coefficients.N_STAGES order = 8 error_estimator_order = 7 A = dop853_coefficients.A[:n_stages, :n_stages] B = dop853_coefficients.B C = dop853_coefficients.C[:n_stages] E3 = dop853_coefficients.E3 E5 = dop853_coefficients.E5 D = dop853_coefficients.D A_EXTRA = dop853_coefficients.A[n_stages + 1:] C_EXTRA = dop853_coefficients.C[n_stages + 1:] def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, rtol=1e-3, atol=1e-6, vectorized=False, first_step=None, **extraneous): super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol, vectorized, first_step, **extraneous) self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED, self.n), dtype=self.y.dtype) self.K = self.K_extended[:self.n_stages + 1] def _estimate_error(self, K, h): # Left for testing purposes. err5 = np.dot(K.T, self.E5) err3 = np.dot(K.T, self.E3) denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3)) correction_factor = np.ones_like(err5) mask = denom > 0 correction_factor[mask] = np.abs(err5[mask]) / denom[mask] return h * err5 * correction_factor def _estimate_error_norm(self, K, h, scale): err5 = np.dot(K.T, self.E5) / scale err3 = np.dot(K.T, self.E3) / scale err5_norm_2 = np.linalg.norm(err5)**2 err3_norm_2 = np.linalg.norm(err3)**2 if err5_norm_2 == 0 and err3_norm_2 == 0: return 0.0 denom = err5_norm_2 + 0.01 * err3_norm_2 return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale)) def _dense_output_impl(self): K = self.K_extended h = self.h_previous for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA), start=self.n_stages + 1): dy = np.dot(K[:s].T, a[:s]) * h K[s] = self.fun(self.t_old + c * h, self.y_old + dy) F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n), dtype=self.y_old.dtype) f_old = K[0] delta_y = self.y - self.y_old F[0] = delta_y F[1] = h * f_old - delta_y F[2] = 2 * delta_y - h * (self.f + f_old) F[3:] = h * np.dot(self.D, K) return Dop853DenseOutput(self.t_old, self.t, self.y_old, F) class RkDenseOutput(DenseOutput): def __init__(self, t_old, t, y_old, Q): super().__init__(t_old, t) self.h = t - t_old self.Q = Q self.order = Q.shape[1] - 1 self.y_old = y_old def _call_impl(self, t): x = (t - self.t_old) / self.h if t.ndim == 0: p = np.tile(x, self.order + 1) p = np.cumprod(p) else: p = np.tile(x, (self.order + 1, 1)) p = np.cumprod(p, axis=0) y = self.h * np.dot(self.Q, p) if y.ndim == 2: y += self.y_old[:, None] else: y += self.y_old return y class Dop853DenseOutput(DenseOutput): def __init__(self, t_old, t, y_old, F): super().__init__(t_old, t) self.h = t - t_old self.F = F self.y_old = y_old def _call_impl(self, t): x = (t - self.t_old) / self.h if t.ndim == 0: y = np.zeros_like(self.y_old) else: x = x[:, None] y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype) for i, f in enumerate(reversed(self.F)): y += f if i % 2 == 0: y *= x else: y *= 1 - x y += self.y_old return y.T
22,766
36.945
107
py
scipy
scipy-main/scipy/integrate/_ivp/common.py
from itertools import groupby from warnings import warn import numpy as np from scipy.sparse import find, coo_matrix EPS = np.finfo(float).eps def validate_first_step(first_step, t0, t_bound): """Assert that first_step is valid and return it.""" if first_step <= 0: raise ValueError("`first_step` must be positive.") if first_step > np.abs(t_bound - t0): raise ValueError("`first_step` exceeds bounds.") return first_step def validate_max_step(max_step): """Assert that max_Step is valid and return it.""" if max_step <= 0: raise ValueError("`max_step` must be positive.") return max_step def warn_extraneous(extraneous): """Display a warning for extraneous keyword arguments. The initializer of each solver class is expected to collect keyword arguments that it doesn't understand and warn about them. This function prints a warning for each key in the supplied dictionary. Parameters ---------- extraneous : dict Extraneous keyword arguments """ if extraneous: warn("The following arguments have no effect for a chosen solver: {}." .format(", ".join(f"`{x}`" for x in extraneous))) def validate_tol(rtol, atol, n): """Validate tolerance values.""" if np.any(rtol < 100 * EPS): warn("At least one element of `rtol` is too small. " f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.") rtol = np.maximum(rtol, 100 * EPS) atol = np.asarray(atol) if atol.ndim > 0 and atol.shape != (n,): raise ValueError("`atol` has wrong shape.") if np.any(atol < 0): raise ValueError("`atol` must be positive.") return rtol, atol def norm(x): """Compute RMS norm.""" return np.linalg.norm(x) / x.size ** 0.5 def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol): """Empirically select a good initial step. The algorithm is described in [1]_. Parameters ---------- fun : callable Right-hand side of the system. t0 : float Initial value of the independent variable. y0 : ndarray, shape (n,) Initial value of the dependent variable. f0 : ndarray, shape (n,) Initial value of the derivative, i.e., ``fun(t0, y0)``. direction : float Integration direction. order : float Error estimator order. It means that the error controlled by the algorithm is proportional to ``step_size ** (order + 1)`. rtol : float Desired relative tolerance. atol : float Desired absolute tolerance. Returns ------- h_abs : float Absolute value of the suggested initial step. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential Equations I: Nonstiff Problems", Sec. II.4. """ if y0.size == 0: return np.inf scale = atol + np.abs(y0) * rtol d0 = norm(y0 / scale) d1 = norm(f0 / scale) if d0 < 1e-5 or d1 < 1e-5: h0 = 1e-6 else: h0 = 0.01 * d0 / d1 y1 = y0 + h0 * direction * f0 f1 = fun(t0 + h0 * direction, y1) d2 = norm((f1 - f0) / scale) / h0 if d1 <= 1e-15 and d2 <= 1e-15: h1 = max(1e-6, h0 * 1e-3) else: h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1)) return min(100 * h0, h1) class OdeSolution: """Continuous ODE solution. It is organized as a collection of `DenseOutput` objects which represent local interpolants. It provides an algorithm to select a right interpolant for each given point. The interpolants cover the range between `t_min` and `t_max` (see Attributes below). Evaluation outside this interval is not forbidden, but the accuracy is not guaranteed. When evaluating at a breakpoint (one of the values in `ts`) a segment with the lower index is selected. Parameters ---------- ts : array_like, shape (n_segments + 1,) Time instants between which local interpolants are defined. Must be strictly increasing or decreasing (zero segment with two points is also allowed). interpolants : list of DenseOutput with n_segments elements Local interpolants. An i-th interpolant is assumed to be defined between ``ts[i]`` and ``ts[i + 1]``. alt_segment : boolean Requests the alternative interpolant segment selection scheme. At each solver integration point, two interpolant segments are available. The default (False) and alternative (True) behaviours select the segment for which the requested time corresponded to ``t`` and ``t_old``, respectively. This functionality is only relevant for testing the interpolants' accuracy: different integrators use different construction strategies. Attributes ---------- t_min, t_max : float Time range of the interpolation. """ def __init__(self, ts, interpolants, alt_segment=False): ts = np.asarray(ts) d = np.diff(ts) # The first case covers integration on zero segment. if not ((ts.size == 2 and ts[0] == ts[-1]) or np.all(d > 0) or np.all(d < 0)): raise ValueError("`ts` must be strictly increasing or decreasing.") self.n_segments = len(interpolants) if ts.shape != (self.n_segments + 1,): raise ValueError("Numbers of time stamps and interpolants " "don't match.") self.ts = ts self.interpolants = interpolants if ts[-1] >= ts[0]: self.t_min = ts[0] self.t_max = ts[-1] self.ascending = True self.side = "right" if alt_segment else "left" self.ts_sorted = ts else: self.t_min = ts[-1] self.t_max = ts[0] self.ascending = False self.side = "left" if alt_segment else "right" self.ts_sorted = ts[::-1] def _call_single(self, t): # Here we preserve a certain symmetry that when t is in self.ts, # if alt_segment=False, then we prioritize a segment with a lower # index. ind = np.searchsorted(self.ts_sorted, t, side=self.side) segment = min(max(ind - 1, 0), self.n_segments - 1) if not self.ascending: segment = self.n_segments - 1 - segment return self.interpolants[segment](t) def __call__(self, t): """Evaluate the solution. Parameters ---------- t : float or array_like with shape (n_points,) Points to evaluate at. Returns ------- y : ndarray, shape (n_states,) or (n_states, n_points) Computed values. Shape depends on whether `t` is a scalar or a 1-D array. """ t = np.asarray(t) if t.ndim == 0: return self._call_single(t) order = np.argsort(t) reverse = np.empty_like(order) reverse[order] = np.arange(order.shape[0]) t_sorted = t[order] # See comment in self._call_single. segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side) segments -= 1 segments[segments < 0] = 0 segments[segments > self.n_segments - 1] = self.n_segments - 1 if not self.ascending: segments = self.n_segments - 1 - segments ys = [] group_start = 0 for segment, group in groupby(segments): group_end = group_start + len(list(group)) y = self.interpolants[segment](t_sorted[group_start:group_end]) ys.append(y) group_start = group_end ys = np.hstack(ys) ys = ys[:, reverse] return ys NUM_JAC_DIFF_REJECT = EPS ** 0.875 NUM_JAC_DIFF_SMALL = EPS ** 0.75 NUM_JAC_DIFF_BIG = EPS ** 0.25 NUM_JAC_MIN_FACTOR = 1e3 * EPS NUM_JAC_FACTOR_INCREASE = 10 NUM_JAC_FACTOR_DECREASE = 0.1 def num_jac(fun, t, y, f, threshold, factor, sparsity=None): """Finite differences Jacobian approximation tailored for ODE solvers. This function computes finite difference approximation to the Jacobian matrix of `fun` with respect to `y` using forward differences. The Jacobian matrix has shape (n, n) and its element (i, j) is equal to ``d f_i / d y_j``. A special feature of this function is the ability to correct the step size from iteration to iteration. The main idea is to keep the finite difference significantly separated from its round-off error which approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a huge error and assures that the estimated derivative are reasonably close to the true values (i.e., the finite difference approximation is at least qualitatively reflects the structure of the true Jacobian). Parameters ---------- fun : callable Right-hand side of the system implemented in a vectorized fashion. t : float Current time. y : ndarray, shape (n,) Current state. f : ndarray, shape (n,) Value of the right hand side at (t, y). threshold : float Threshold for `y` value used for computing the step size as ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of absolute tolerance (atol) for a solver should be passed as `threshold`. factor : ndarray with shape (n,) or None Factor to use for computing the step size. Pass None for the very evaluation, then use the value returned from this function. sparsity : tuple (structure, groups) or None Sparsity structure of the Jacobian, `structure` must be csc_matrix. Returns ------- J : ndarray or csc_matrix, shape (n, n) Jacobian matrix. factor : ndarray, shape (n,) Suggested `factor` for the next evaluation. """ y = np.asarray(y) n = y.shape[0] if n == 0: return np.empty((0, 0)), factor if factor is None: factor = np.full(n, EPS ** 0.5) else: factor = factor.copy() # Direct the step as ODE dictates, hoping that such a step won't lead to # a problematic region. For complex ODEs it makes sense to use the real # part of f as we use steps along real axis. f_sign = 2 * (np.real(f) >= 0).astype(float) - 1 y_scale = f_sign * np.maximum(threshold, np.abs(y)) h = (y + factor * y_scale) - y # Make sure that the step is not 0 to start with. Not likely it will be # executed often. for i in np.nonzero(h == 0)[0]: while h[i] == 0: factor[i] *= 10 h[i] = (y[i] + factor[i] * y_scale[i]) - y[i] if sparsity is None: return _dense_num_jac(fun, t, y, f, h, factor, y_scale) else: structure, groups = sparsity return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups) def _dense_num_jac(fun, t, y, f, h, factor, y_scale): n = y.shape[0] h_vecs = np.diag(h) f_new = fun(t, y[:, None] + h_vecs) diff = f_new - f[:, None] max_ind = np.argmax(np.abs(diff), axis=0) r = np.arange(n) max_diff = np.abs(diff[max_ind, r]) scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale if np.any(diff_too_small): ind, = np.nonzero(diff_too_small) new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] h_vecs[ind, ind] = h_new f_new = fun(t, y[:, None] + h_vecs[:, ind]) diff_new = f_new - f[:, None] max_ind = np.argmax(np.abs(diff_new), axis=0) r = np.arange(ind.shape[0]) max_diff_new = np.abs(diff_new[max_ind, r]) scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) update = max_diff[ind] * scale_new < max_diff_new * scale[ind] if np.any(update): update, = np.nonzero(update) update_ind = ind[update] factor[update_ind] = new_factor[update] h[update_ind] = h_new[update] diff[:, update_ind] = diff_new[:, update] scale[update_ind] = scale_new[update] max_diff[update_ind] = max_diff_new[update] diff /= h factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) return diff, factor def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups): n = y.shape[0] n_groups = np.max(groups) + 1 h_vecs = np.empty((n_groups, n)) for group in range(n_groups): e = np.equal(group, groups) h_vecs[group] = h * e h_vecs = h_vecs.T f_new = fun(t, y[:, None] + h_vecs) df = f_new - f[:, None] i, j, _ = find(structure) diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc() max_ind = np.array(abs(diff).argmax(axis=0)).ravel() r = np.arange(n) max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel() scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, groups[r]])) diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale if np.any(diff_too_small): ind, = np.nonzero(diff_too_small) new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] h_new_all = np.zeros(n) h_new_all[ind] = h_new groups_unique = np.unique(groups[ind]) groups_map = np.empty(n_groups, dtype=int) h_vecs = np.empty((groups_unique.shape[0], n)) for k, group in enumerate(groups_unique): e = np.equal(group, groups) h_vecs[k] = h_new_all * e groups_map[group] = k h_vecs = h_vecs.T f_new = fun(t, y[:, None] + h_vecs) df = f_new - f[:, None] i, j, _ = find(structure[:, ind]) diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], (i, j)), shape=(n, ind.shape[0])).tocsc() max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel() r = np.arange(ind.shape[0]) max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel() scale_new = np.maximum( np.abs(f[max_ind_new]), np.abs(f_new[max_ind_new, groups_map[groups[ind]]])) update = max_diff[ind] * scale_new < max_diff_new * scale[ind] if np.any(update): update, = np.nonzero(update) update_ind = ind[update] factor[update_ind] = new_factor[update] h[update_ind] = h_new[update] diff[:, update_ind] = diff_new[:, update] scale[update_ind] = scale_new[update] max_diff[update_ind] = max_diff_new[update] diff.data /= np.repeat(h, np.diff(diff.indptr)) factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) return diff, factor
15,220
33.671982
79
py
scipy
scipy-main/scipy/integrate/_ivp/dop853_coefficients.py
import numpy as np N_STAGES = 12 N_STAGES_EXTENDED = 16 INTERPOLATOR_POWER = 7 C = np.array([0.0, 0.526001519587677318785587544488e-01, 0.789002279381515978178381316732e-01, 0.118350341907227396726757197510, 0.281649658092772603273242802490, 0.333333333333333333333333333333, 0.25, 0.307692307692307692307692307692, 0.651282051282051282051282051282, 0.6, 0.857142857142857142857142857142, 1.0, 1.0, 0.1, 0.2, 0.777777777777777777777777777778]) A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED)) A[1, 0] = 5.26001519587677318785587544488e-2 A[2, 0] = 1.97250569845378994544595329183e-2 A[2, 1] = 5.91751709536136983633785987549e-2 A[3, 0] = 2.95875854768068491816892993775e-2 A[3, 2] = 8.87627564304205475450678981324e-2 A[4, 0] = 2.41365134159266685502369798665e-1 A[4, 2] = -8.84549479328286085344864962717e-1 A[4, 3] = 9.24834003261792003115737966543e-1 A[5, 0] = 3.7037037037037037037037037037e-2 A[5, 3] = 1.70828608729473871279604482173e-1 A[5, 4] = 1.25467687566822425016691814123e-1 A[6, 0] = 3.7109375e-2 A[6, 3] = 1.70252211019544039314978060272e-1 A[6, 4] = 6.02165389804559606850219397283e-2 A[6, 5] = -1.7578125e-2 A[7, 0] = 3.70920001185047927108779319836e-2 A[7, 3] = 1.70383925712239993810214054705e-1 A[7, 4] = 1.07262030446373284651809199168e-1 A[7, 5] = -1.53194377486244017527936158236e-2 A[7, 6] = 8.27378916381402288758473766002e-3 A[8, 0] = 6.24110958716075717114429577812e-1 A[8, 3] = -3.36089262944694129406857109825 A[8, 4] = -8.68219346841726006818189891453e-1 A[8, 5] = 2.75920996994467083049415600797e1 A[8, 6] = 2.01540675504778934086186788979e1 A[8, 7] = -4.34898841810699588477366255144e1 A[9, 0] = 4.77662536438264365890433908527e-1 A[9, 3] = -2.48811461997166764192642586468 A[9, 4] = -5.90290826836842996371446475743e-1 A[9, 5] = 2.12300514481811942347288949897e1 A[9, 6] = 1.52792336328824235832596922938e1 A[9, 7] = -3.32882109689848629194453265587e1 A[9, 8] = -2.03312017085086261358222928593e-2 A[10, 0] = -9.3714243008598732571704021658e-1 A[10, 3] = 5.18637242884406370830023853209 A[10, 4] = 1.09143734899672957818500254654 A[10, 5] = -8.14978701074692612513997267357 A[10, 6] = -1.85200656599969598641566180701e1 A[10, 7] = 2.27394870993505042818970056734e1 A[10, 8] = 2.49360555267965238987089396762 A[10, 9] = -3.0467644718982195003823669022 A[11, 0] = 2.27331014751653820792359768449 A[11, 3] = -1.05344954667372501984066689879e1 A[11, 4] = -2.00087205822486249909675718444 A[11, 5] = -1.79589318631187989172765950534e1 A[11, 6] = 2.79488845294199600508499808837e1 A[11, 7] = -2.85899827713502369474065508674 A[11, 8] = -8.87285693353062954433549289258 A[11, 9] = 1.23605671757943030647266201528e1 A[11, 10] = 6.43392746015763530355970484046e-1 A[12, 0] = 5.42937341165687622380535766363e-2 A[12, 5] = 4.45031289275240888144113950566 A[12, 6] = 1.89151789931450038304281599044 A[12, 7] = -5.8012039600105847814672114227 A[12, 8] = 3.1116436695781989440891606237e-1 A[12, 9] = -1.52160949662516078556178806805e-1 A[12, 10] = 2.01365400804030348374776537501e-1 A[12, 11] = 4.47106157277725905176885569043e-2 A[13, 0] = 5.61675022830479523392909219681e-2 A[13, 6] = 2.53500210216624811088794765333e-1 A[13, 7] = -2.46239037470802489917441475441e-1 A[13, 8] = -1.24191423263816360469010140626e-1 A[13, 9] = 1.5329179827876569731206322685e-1 A[13, 10] = 8.20105229563468988491666602057e-3 A[13, 11] = 7.56789766054569976138603589584e-3 A[13, 12] = -8.298e-3 A[14, 0] = 3.18346481635021405060768473261e-2 A[14, 5] = 2.83009096723667755288322961402e-2 A[14, 6] = 5.35419883074385676223797384372e-2 A[14, 7] = -5.49237485713909884646569340306e-2 A[14, 10] = -1.08347328697249322858509316994e-4 A[14, 11] = 3.82571090835658412954920192323e-4 A[14, 12] = -3.40465008687404560802977114492e-4 A[14, 13] = 1.41312443674632500278074618366e-1 A[15, 0] = -4.28896301583791923408573538692e-1 A[15, 5] = -4.69762141536116384314449447206 A[15, 6] = 7.68342119606259904184240953878 A[15, 7] = 4.06898981839711007970213554331 A[15, 8] = 3.56727187455281109270669543021e-1 A[15, 12] = -1.39902416515901462129418009734e-3 A[15, 13] = 2.9475147891527723389556272149 A[15, 14] = -9.15095847217987001081870187138 B = A[N_STAGES, :N_STAGES] E3 = np.zeros(N_STAGES + 1) E3[:-1] = B.copy() E3[0] -= 0.244094488188976377952755905512 E3[8] -= 0.733846688281611857341361741547 E3[11] -= 0.220588235294117647058823529412e-1 E5 = np.zeros(N_STAGES + 1) E5[0] = 0.1312004499419488073250102996e-1 E5[5] = -0.1225156446376204440720569753e+1 E5[6] = -0.4957589496572501915214079952 E5[7] = 0.1664377182454986536961530415e+1 E5[8] = -0.3503288487499736816886487290 E5[9] = 0.3341791187130174790297318841 E5[10] = 0.8192320648511571246570742613e-1 E5[11] = -0.2235530786388629525884427845e-1 # First 3 coefficients are computed separately. D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED)) D[0, 0] = -0.84289382761090128651353491142e+1 D[0, 5] = 0.56671495351937776962531783590 D[0, 6] = -0.30689499459498916912797304727e+1 D[0, 7] = 0.23846676565120698287728149680e+1 D[0, 8] = 0.21170345824450282767155149946e+1 D[0, 9] = -0.87139158377797299206789907490 D[0, 10] = 0.22404374302607882758541771650e+1 D[0, 11] = 0.63157877876946881815570249290 D[0, 12] = -0.88990336451333310820698117400e-1 D[0, 13] = 0.18148505520854727256656404962e+2 D[0, 14] = -0.91946323924783554000451984436e+1 D[0, 15] = -0.44360363875948939664310572000e+1 D[1, 0] = 0.10427508642579134603413151009e+2 D[1, 5] = 0.24228349177525818288430175319e+3 D[1, 6] = 0.16520045171727028198505394887e+3 D[1, 7] = -0.37454675472269020279518312152e+3 D[1, 8] = -0.22113666853125306036270938578e+2 D[1, 9] = 0.77334326684722638389603898808e+1 D[1, 10] = -0.30674084731089398182061213626e+2 D[1, 11] = -0.93321305264302278729567221706e+1 D[1, 12] = 0.15697238121770843886131091075e+2 D[1, 13] = -0.31139403219565177677282850411e+2 D[1, 14] = -0.93529243588444783865713862664e+1 D[1, 15] = 0.35816841486394083752465898540e+2 D[2, 0] = 0.19985053242002433820987653617e+2 D[2, 5] = -0.38703730874935176555105901742e+3 D[2, 6] = -0.18917813819516756882830838328e+3 D[2, 7] = 0.52780815920542364900561016686e+3 D[2, 8] = -0.11573902539959630126141871134e+2 D[2, 9] = 0.68812326946963000169666922661e+1 D[2, 10] = -0.10006050966910838403183860980e+1 D[2, 11] = 0.77771377980534432092869265740 D[2, 12] = -0.27782057523535084065932004339e+1 D[2, 13] = -0.60196695231264120758267380846e+2 D[2, 14] = 0.84320405506677161018159903784e+2 D[2, 15] = 0.11992291136182789328035130030e+2 D[3, 0] = -0.25693933462703749003312586129e+2 D[3, 5] = -0.15418974869023643374053993627e+3 D[3, 6] = -0.23152937917604549567536039109e+3 D[3, 7] = 0.35763911791061412378285349910e+3 D[3, 8] = 0.93405324183624310003907691704e+2 D[3, 9] = -0.37458323136451633156875139351e+2 D[3, 10] = 0.10409964950896230045147246184e+3 D[3, 11] = 0.29840293426660503123344363579e+2 D[3, 12] = -0.43533456590011143754432175058e+2 D[3, 13] = 0.96324553959188282948394950600e+2 D[3, 14] = -0.39177261675615439165231486172e+2 D[3, 15] = -0.14972683625798562581422125276e+3
7,237
36.309278
57
py
scipy
scipy-main/scipy/integrate/_ivp/__init__.py
"""Suite of ODE solvers implemented in Python.""" from .ivp import solve_ivp from .rk import RK23, RK45, DOP853 from .radau import Radau from .bdf import BDF from .lsoda import LSODA from .common import OdeSolution from .base import DenseOutput, OdeSolver
256
27.555556
49
py
scipy
scipy-main/scipy/integrate/_ivp/tests/test_ivp.py
from itertools import product from numpy.testing import (assert_, assert_allclose, assert_array_less, assert_equal, assert_no_warnings, suppress_warnings) import pytest from pytest import raises as assert_raises import numpy as np from scipy.optimize._numdiff import group_columns from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA from scipy.integrate import OdeSolution from scipy.integrate._ivp.common import num_jac from scipy.integrate._ivp.base import ConstantDenseOutput from scipy.sparse import coo_matrix, csc_matrix def fun_zero(t, y): return np.zeros_like(y) def fun_linear(t, y): return np.array([-y[0] - 5 * y[1], y[0] + y[1]]) def jac_linear(): return np.array([[-1, -5], [1, 1]]) def sol_linear(t): return np.vstack((-5 * np.sin(2 * t), 2 * np.cos(2 * t) + np.sin(2 * t))) def fun_rational(t, y): return np.array([y[1] / t, y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))]) def fun_rational_vectorized(t, y): return np.vstack((y[1] / t, y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1)))) def jac_rational(t, y): return np.array([ [0, 1 / t], [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] ]) def jac_rational_sparse(t, y): return csc_matrix([ [0, 1 / t], [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] ]) def sol_rational(t): return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2)) def fun_medazko(t, y): n = y.shape[0] // 2 k = 100 c = 4 phi = 2 if t <= 5 else 0 y = np.hstack((phi, 0, y, y[-2])) d = 1 / n j = np.arange(n) + 1 alpha = 2 * (j * d - 1) ** 3 / c ** 2 beta = (j * d - 1) ** 4 / c ** 2 j_2_p1 = 2 * j + 2 j_2_m3 = 2 * j - 2 j_2_m1 = 2 * j j_2 = 2 * j + 1 f = np.empty(2 * n) f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) + beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 - k * y[j_2_m1] * y[j_2]) f[1::2] = -k * y[j_2] * y[j_2_m1] return f def medazko_sparsity(n): cols = [] rows = [] i = np.arange(n) * 2 cols.append(i[1:]) rows.append(i[1:] - 2) cols.append(i) rows.append(i) cols.append(i) rows.append(i + 1) cols.append(i[:-1]) rows.append(i[:-1] + 2) i = np.arange(n) * 2 + 1 cols.append(i) rows.append(i) cols.append(i) rows.append(i - 1) cols = np.hstack(cols) rows = np.hstack(rows) return coo_matrix((np.ones_like(cols), (cols, rows))) def fun_complex(t, y): return -y def jac_complex(t, y): return -np.eye(y.shape[0]) def jac_complex_sparse(t, y): return csc_matrix(jac_complex(t, y)) def sol_complex(t): y = (0.5 + 1j) * np.exp(-t) return y.reshape((1, -1)) def fun_event_dense_output_LSODA(t, y): return y * (t - 2) def jac_event_dense_output_LSODA(t, y): return t - 2 def sol_event_dense_output_LSODA(t): return np.exp(t ** 2 / 2 - 2 * t + np.log(0.05) - 6) def compute_error(y, y_true, rtol, atol): e = (y - y_true) / (atol + rtol * np.abs(y_true)) return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0]) def test_integration(): rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] for vectorized, method, t_span, jac in product( [False, True], ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'], [[5, 9], [5, 1]], [None, jac_rational, jac_rational_sparse]): if vectorized: fun = fun_rational_vectorized else: fun = fun_rational with suppress_warnings() as sup: sup.filter(UserWarning, "The following arguments have no effect for a chosen " "solver: `jac`") res = solve_ivp(fun, t_span, y0, rtol=rtol, atol=atol, method=method, dense_output=True, jac=jac, vectorized=vectorized) assert_equal(res.t[0], t_span[0]) assert_(res.t_events is None) assert_(res.y_events is None) assert_(res.success) assert_equal(res.status, 0) if method == 'DOP853': # DOP853 spends more functions evaluation because it doesn't # have enough time to develop big enough step size. assert_(res.nfev < 50) else: assert_(res.nfev < 40) if method in ['RK23', 'RK45', 'DOP853', 'LSODA']: assert_equal(res.njev, 0) assert_equal(res.nlu, 0) else: assert_(0 < res.njev < 3) assert_(0 < res.nlu < 10) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) tc = np.linspace(*t_span) yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 5)) tc = (t_span[0] + t_span[-1]) / 2 yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 5)) assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) def test_integration_complex(): rtol = 1e-3 atol = 1e-6 y0 = [0.5 + 1j] t_span = [0, 1] tc = np.linspace(t_span[0], t_span[1]) for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'], [None, jac_complex, jac_complex_sparse]): with suppress_warnings() as sup: sup.filter(UserWarning, "The following arguments have no effect for a chosen " "solver: `jac`") res = solve_ivp(fun_complex, t_span, y0, method=method, dense_output=True, rtol=rtol, atol=atol, jac=jac) assert_equal(res.t[0], t_span[0]) assert_(res.t_events is None) assert_(res.y_events is None) assert_(res.success) assert_equal(res.status, 0) if method == 'DOP853': assert res.nfev < 35 else: assert res.nfev < 25 if method == 'BDF': assert_equal(res.njev, 1) assert res.nlu < 6 else: assert res.njev == 0 assert res.nlu == 0 y_true = sol_complex(res.t) e = compute_error(res.y, y_true, rtol, atol) assert np.all(e < 5) yc_true = sol_complex(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert np.all(e < 5) def test_integration_sparse_difference(): n = 200 t_span = [0, 20] y0 = np.zeros(2 * n) y0[1::2] = 1 sparsity = medazko_sparsity(n) for method in ['BDF', 'Radau']: res = solve_ivp(fun_medazko, t_span, y0, method=method, jac_sparsity=sparsity) assert_equal(res.t[0], t_span[0]) assert_(res.t_events is None) assert_(res.y_events is None) assert_(res.success) assert_equal(res.status, 0) assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2) assert_allclose(res.y[79, -1], 0, atol=1e-3) assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2) assert_allclose(res.y[149, -1], 0, atol=1e-3) assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2) assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3) assert_allclose(res.y[238, -1], 0, atol=1e-3) assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2) def test_integration_const_jac(): rtol = 1e-3 atol = 1e-6 y0 = [0, 2] t_span = [0, 2] J = jac_linear() J_sparse = csc_matrix(J) for method, jac in product(['Radau', 'BDF'], [J, J_sparse]): res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol, method=method, dense_output=True, jac=jac) assert_equal(res.t[0], t_span[0]) assert_(res.t_events is None) assert_(res.y_events is None) assert_(res.success) assert_equal(res.status, 0) assert_(res.nfev < 100) assert_equal(res.njev, 0) assert_(0 < res.nlu < 15) y_true = sol_linear(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 10)) tc = np.linspace(*t_span) yc_true = sol_linear(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 15)) assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14) @pytest.mark.slow @pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA']) def test_integration_stiff(method): rtol = 1e-6 atol = 1e-6 y0 = [1e4, 0, 0] tspan = [0, 1e8] def fun_robertson(t, state): x, y, z = state return [ -0.04 * x + 1e4 * y * z, 0.04 * x - 1e4 * y * z - 3e7 * y * y, 3e7 * y * y, ] res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol, atol=atol, method=method) # If the stiff mode is not activated correctly, these numbers will be much bigger assert res.nfev < 5000 assert res.njev < 200 def test_events(): def event_rational_1(t, y): return y[0] - y[1] ** 0.7 def event_rational_2(t, y): return y[1] ** 0.6 - y[0] def event_rational_3(t, y): return t - 7.4 event_rational_3.terminal = True for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method, events=(event_rational_1, event_rational_2)) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 1) assert_equal(res.t_events[1].size, 1) assert_(5.3 < res.t_events[0][0] < 5.7) assert_(7.3 < res.t_events[1][0] < 7.7) assert_equal(res.y_events[0].shape, (1, 2)) assert_equal(res.y_events[1].shape, (1, 2)) assert np.isclose( event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) assert np.isclose( event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) event_rational_1.direction = 1 event_rational_2.direction = 1 res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, events=(event_rational_1, event_rational_2)) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 1) assert_equal(res.t_events[1].size, 0) assert_(5.3 < res.t_events[0][0] < 5.7) assert_equal(res.y_events[0].shape, (1, 2)) assert_equal(res.y_events[1].shape, (0,)) assert np.isclose( event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) event_rational_1.direction = -1 event_rational_2.direction = -1 res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, events=(event_rational_1, event_rational_2)) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 0) assert_equal(res.t_events[1].size, 1) assert_(7.3 < res.t_events[1][0] < 7.7) assert_equal(res.y_events[0].shape, (0,)) assert_equal(res.y_events[1].shape, (1, 2)) assert np.isclose( event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) event_rational_1.direction = 0 event_rational_2.direction = 0 res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, events=(event_rational_1, event_rational_2, event_rational_3), dense_output=True) assert_equal(res.status, 1) assert_equal(res.t_events[0].size, 1) assert_equal(res.t_events[1].size, 0) assert_equal(res.t_events[2].size, 1) assert_(5.3 < res.t_events[0][0] < 5.7) assert_(7.3 < res.t_events[2][0] < 7.5) assert_equal(res.y_events[0].shape, (1, 2)) assert_equal(res.y_events[1].shape, (0,)) assert_equal(res.y_events[2].shape, (1, 2)) assert np.isclose( event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) assert np.isclose( event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0) res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, events=event_rational_1, dense_output=True) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 1) assert_(5.3 < res.t_events[0][0] < 5.7) assert_equal(res.y_events[0].shape, (1, 2)) assert np.isclose( event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) # Also test that termination by event doesn't break interpolants. tc = np.linspace(res.t[0], res.t[-1]) yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, 1e-3, 1e-6) assert_(np.all(e < 5)) # Test that the y_event matches solution assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], rtol=1e-3, atol=1e-6) # Test in backward direction. event_rational_1.direction = 0 event_rational_2.direction = 0 for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, events=(event_rational_1, event_rational_2)) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 1) assert_equal(res.t_events[1].size, 1) assert_(5.3 < res.t_events[0][0] < 5.7) assert_(7.3 < res.t_events[1][0] < 7.7) assert_equal(res.y_events[0].shape, (1, 2)) assert_equal(res.y_events[1].shape, (1, 2)) assert np.isclose( event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) assert np.isclose( event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) event_rational_1.direction = -1 event_rational_2.direction = -1 res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, events=(event_rational_1, event_rational_2)) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 1) assert_equal(res.t_events[1].size, 0) assert_(5.3 < res.t_events[0][0] < 5.7) assert_equal(res.y_events[0].shape, (1, 2)) assert_equal(res.y_events[1].shape, (0,)) assert np.isclose( event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) event_rational_1.direction = 1 event_rational_2.direction = 1 res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, events=(event_rational_1, event_rational_2)) assert_equal(res.status, 0) assert_equal(res.t_events[0].size, 0) assert_equal(res.t_events[1].size, 1) assert_(7.3 < res.t_events[1][0] < 7.7) assert_equal(res.y_events[0].shape, (0,)) assert_equal(res.y_events[1].shape, (1, 2)) assert np.isclose( event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) event_rational_1.direction = 0 event_rational_2.direction = 0 res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, events=(event_rational_1, event_rational_2, event_rational_3), dense_output=True) assert_equal(res.status, 1) assert_equal(res.t_events[0].size, 0) assert_equal(res.t_events[1].size, 1) assert_equal(res.t_events[2].size, 1) assert_(7.3 < res.t_events[1][0] < 7.7) assert_(7.3 < res.t_events[2][0] < 7.5) assert_equal(res.y_events[0].shape, (0,)) assert_equal(res.y_events[1].shape, (1, 2)) assert_equal(res.y_events[2].shape, (1, 2)) assert np.isclose( event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) assert np.isclose( event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0) # Also test that termination by event doesn't break interpolants. tc = np.linspace(res.t[-1], res.t[0]) yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, 1e-3, 1e-6) assert_(np.all(e < 5)) assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], rtol=1e-3, atol=1e-6) assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], rtol=1e-3, atol=1e-6) def test_max_step(): rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: for t_span in ([5, 9], [5, 1]): res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, max_step=0.5, atol=atol, method=method, dense_output=True) assert_equal(res.t[0], t_span[0]) assert_equal(res.t[-1], t_span[-1]) assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15)) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) tc = np.linspace(*t_span) yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 5)) assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) assert_raises(ValueError, method, fun_rational, t_span[0], y0, t_span[1], max_step=-1) if method is not LSODA: solver = method(fun_rational, t_span[0], y0, t_span[1], rtol=rtol, atol=atol, max_step=1e-20) message = solver.step() assert_equal(solver.status, 'failed') assert_("step size is less" in message) assert_raises(RuntimeError, solver.step) def test_first_step(): rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] first_step = 0.1 for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: for t_span in ([5, 9], [5, 1]): res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, max_step=0.5, atol=atol, method=method, dense_output=True, first_step=first_step) assert_equal(res.t[0], t_span[0]) assert_equal(res.t[-1], t_span[-1]) assert_allclose(first_step, np.abs(res.t[1] - 5)) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) tc = np.linspace(*t_span) yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 5)) assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) assert_raises(ValueError, method, fun_rational, t_span[0], y0, t_span[1], first_step=-1) assert_raises(ValueError, method, fun_rational, t_span[0], y0, t_span[1], first_step=5) def test_t_eval(): rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] for t_span in ([5, 9], [5, 1]): t_eval = np.linspace(t_span[0], t_span[1], 10) res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, t_eval=t_eval) assert_equal(res.t, t_eval) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) t_eval = [5, 5.01, 7, 8, 8.01, 9] res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, t_eval=t_eval) assert_equal(res.t, t_eval) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1] res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, t_eval=t_eval) assert_equal(res.t, t_eval) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) t_eval = [5.01, 7, 8, 8.01] res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, t_eval=t_eval) assert_equal(res.t, t_eval) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) t_eval = [4.99, 3, 1.5, 1.1, 1.01] res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, t_eval=t_eval) assert_equal(res.t, t_eval) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) t_eval = [4, 6] assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0, rtol=rtol, atol=atol, t_eval=t_eval) def test_t_eval_dense_output(): rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] t_span = [5, 9] t_eval = np.linspace(t_span[0], t_span[1], 10) res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, t_eval=t_eval) res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, t_eval=t_eval, dense_output=True) assert_equal(res.t, t_eval) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) assert_equal(res.t, res_d.t) assert_equal(res.y, res_d.y) assert_(res_d.t_events is None) assert_(res_d.success) assert_equal(res_d.status, 0) # if t and y are equal only test values for one case y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) def test_t_eval_early_event(): def early_event(t, y): return t - 7 early_event.terminal = True rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] t_span = [5, 9] t_eval = np.linspace(7.5, 9, 16) for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: with suppress_warnings() as sup: sup.filter(UserWarning, "The following arguments have no effect for a chosen " "solver: `jac`") res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, method=method, t_eval=t_eval, events=early_event, jac=jac_rational) assert res.success assert res.message == 'A termination event occurred.' assert res.status == 1 assert not res.t and not res.y assert len(res.t_events) == 1 assert res.t_events[0].size == 1 assert res.t_events[0][0] == 7 def test_event_dense_output_LSODA(): def event_lsoda(t, y): return y[0] - 2.02e-5 rtol = 1e-3 atol = 1e-6 y0 = [0.05] t_span = [-2, 2] first_step = 1e-3 res = solve_ivp( fun_event_dense_output_LSODA, t_span, y0, method="LSODA", dense_output=True, events=event_lsoda, first_step=first_step, max_step=1, rtol=rtol, atol=atol, jac=jac_event_dense_output_LSODA, ) assert_equal(res.t[0], t_span[0]) assert_equal(res.t[-1], t_span[-1]) assert_allclose(first_step, np.abs(res.t[1] - t_span[0])) assert res.success assert_equal(res.status, 0) y_true = sol_event_dense_output_LSODA(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_array_less(e, 5) tc = np.linspace(*t_span) yc_true = sol_event_dense_output_LSODA(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_array_less(e, 5) assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) def test_no_integration(): for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3], method=method, dense_output=True) assert_equal(sol.sol(4), [2, 3]) assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]]) def test_no_integration_class(): for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0) solver.step() assert_equal(solver.status, 'finished') sol = solver.dense_output() assert_equal(sol(0.0), [10.0, 0.0]) assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]]) solver = method(lambda t, y: -y, 0.0, [], np.inf) solver.step() assert_equal(solver.status, 'finished') sol = solver.dense_output() assert_equal(sol(100.0), []) assert_equal(sol([0, 1, 2]), np.empty((0, 3))) def test_empty(): def fun(t, y): return np.zeros((0,)) y0 = np.zeros((0,)) for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0, method=method, dense_output=True) assert_equal(sol.sol(10), np.zeros((0,))) assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0, method=method, dense_output=True) assert_equal(sol.sol(10), np.zeros((0,))) assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) def test_ConstantDenseOutput(): sol = ConstantDenseOutput(0, 1, np.array([1, 2])) assert_allclose(sol(1.5), [1, 2]) assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]]) sol = ConstantDenseOutput(0, 1, np.array([])) assert_allclose(sol(1.5), np.empty(0)) assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3))) def test_classes(): y0 = [1 / 3, 2 / 9] for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]: solver = cls(fun_rational, 5, y0, np.inf) assert_equal(solver.n, 2) assert_equal(solver.status, 'running') assert_equal(solver.t_bound, np.inf) assert_equal(solver.direction, 1) assert_equal(solver.t, 5) assert_equal(solver.y, y0) assert_(solver.step_size is None) if cls is not LSODA: assert_(solver.nfev > 0) assert_(solver.njev >= 0) assert_equal(solver.nlu, 0) else: assert_equal(solver.nfev, 0) assert_equal(solver.njev, 0) assert_equal(solver.nlu, 0) assert_raises(RuntimeError, solver.dense_output) message = solver.step() assert_equal(solver.status, 'running') assert_equal(message, None) assert_equal(solver.n, 2) assert_equal(solver.t_bound, np.inf) assert_equal(solver.direction, 1) assert_(solver.t > 5) assert_(not np.all(np.equal(solver.y, y0))) assert_(solver.step_size > 0) assert_(solver.nfev > 0) assert_(solver.njev >= 0) assert_(solver.nlu >= 0) sol = solver.dense_output() assert_allclose(sol(5), y0, rtol=1e-15, atol=0) def test_OdeSolution(): ts = np.array([0, 2, 5], dtype=float) s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) sol = OdeSolution(ts, [s1, s2]) assert_equal(sol(-1), [-1]) assert_equal(sol(1), [-1]) assert_equal(sol(2), [-1]) assert_equal(sol(3), [1]) assert_equal(sol(5), [1]) assert_equal(sol(6), [1]) assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]), np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]])) ts = np.array([10, 4, -3]) s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) sol = OdeSolution(ts, [s1, s2]) assert_equal(sol(11), [-1]) assert_equal(sol(10), [-1]) assert_equal(sol(5), [-1]) assert_equal(sol(4), [-1]) assert_equal(sol(0), [1]) assert_equal(sol(-3), [1]) assert_equal(sol(-4), [1]) assert_equal(sol([12, -5, 10, -3, 6, 1, 4]), np.array([[-1, 1, -1, 1, -1, 1, -1]])) ts = np.array([1, 1]) s = ConstantDenseOutput(1, 1, np.array([10])) sol = OdeSolution(ts, [s]) assert_equal(sol(0), [10]) assert_equal(sol(1), [10]) assert_equal(sol(2), [10]) assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]])) def test_num_jac(): def fun(t, y): return np.vstack([ -0.04 * y[0] + 1e4 * y[1] * y[2], 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2, 3e7 * y[1] ** 2 ]) def jac(t, y): return np.array([ [-0.04, 1e4 * y[2], 1e4 * y[1]], [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]], [0, 6e7 * y[1], 0] ]) t = 1 y = np.array([1, 0, 0]) J_true = jac(t, y) threshold = 1e-5 f = fun(t, y).ravel() J_num, factor = num_jac(fun, t, y, f, threshold, None) assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) J_num, factor = num_jac(fun, t, y, f, threshold, factor) assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) def test_num_jac_sparse(): def fun(t, y): e = y[1:]**3 - y[:-1]**2 z = np.zeros(y.shape[1]) return np.vstack((z, 3 * e)) + np.vstack((2 * e, z)) def structure(n): A = np.zeros((n, n), dtype=int) A[0, 0] = 1 A[0, 1] = 1 for i in range(1, n - 1): A[i, i - 1: i + 2] = 1 A[-1, -1] = 1 A[-1, -2] = 1 return A np.random.seed(0) n = 20 y = np.random.randn(n) A = structure(n) groups = group_columns(A) f = fun(0, y[:, None]).ravel() # Compare dense and sparse results, assuming that dense implementation # is correct (as it is straightforward). J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None, sparsity=(A, groups)) J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None) assert_allclose(J_num_dense, J_num_sparse.toarray(), rtol=1e-12, atol=1e-14) assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) # Take small factors to trigger their recomputing inside. factor = np.random.uniform(0, 1e-12, size=n) J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor, sparsity=(A, groups)) J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor) assert_allclose(J_num_dense, J_num_sparse.toarray(), rtol=1e-12, atol=1e-14) assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) def test_args(): # sys3 is actually two decoupled systems. (x, y) form a # linear oscillator, while z is a nonlinear first order # system with equilibria at z=0 and z=1. If k > 0, z=1 # is stable and z=0 is unstable. def sys3(t, w, omega, k, zfinal): x, y, z = w return [-omega*y, omega*x, k*z*(1 - z)] def sys3_jac(t, w, omega, k, zfinal): x, y, z = w J = np.array([[0, -omega, 0], [omega, 0, 0], [0, 0, k*(1 - 2*z)]]) return J def sys3_x0decreasing(t, w, omega, k, zfinal): x, y, z = w return x def sys3_y0increasing(t, w, omega, k, zfinal): x, y, z = w return y def sys3_zfinal(t, w, omega, k, zfinal): x, y, z = w return z - zfinal # Set the event flags for the event functions. sys3_x0decreasing.direction = -1 sys3_y0increasing.direction = 1 sys3_zfinal.terminal = True omega = 2 k = 4 tfinal = 5 zfinal = 0.99 # Find z0 such that when z(0) = z0, z(tfinal) = zfinal. # The condition z(tfinal) = zfinal is the terminal event. z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal)) w0 = [0, -1, z0] # Provide the jac argument and use the Radau method to ensure that the use # of the Jacobian function is exercised. # If event handling is working, the solution will stop at tfinal, not tend. tend = 2*tfinal sol = solve_ivp(sys3, [0, tend], w0, events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal], dense_output=True, args=(omega, k, zfinal), method='Radau', jac=sys3_jac, rtol=1e-10, atol=1e-13) # Check that we got the expected events at the expected times. x0events_t = sol.t_events[0] y0events_t = sol.t_events[1] zfinalevents_t = sol.t_events[2] assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi]) assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi]) assert_allclose(zfinalevents_t, [tfinal]) # Check that the solution agrees with the known exact solution. t = np.linspace(0, zfinalevents_t[0], 250) w = sol.sol(t) assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12) assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12) assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1), rtol=1e-9, atol=1e-12) # Check that the state variables have the expected values at the events. x0events = sol.sol(x0events_t) y0events = sol.sol(y0events_t) zfinalevents = sol.sol(zfinalevents_t) assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14) assert_allclose(x0events[1], np.ones_like(x0events[1])) assert_allclose(y0events[0], np.ones_like(y0events[0])) assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14) assert_allclose(zfinalevents[2], [zfinal]) def test_array_rtol(): # solve_ivp had a bug with array_like `rtol`; see gh-15482 # check that it's fixed def f(t, y): return y[0], y[1] # no warning (or error) when `rtol` is array_like sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1]) err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1))) # warning when an element of `rtol` is too small with pytest.warns(UserWarning, match="At least one element..."): sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16]) err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1))) # tighter rtol improves the error assert err2 < err1 @pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']) def test_integration_zero_rhs(method): result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method) assert_(result.success) assert_equal(result.status, 0) assert_allclose(result.y, 1.0, rtol=1e-15) def test_args_single_value(): def fun_with_arg(t, y, a): return a*y message = "Supplied 'args' cannot be unpacked." with pytest.raises(TypeError, match=message): solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1) sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,)) assert_allclose(sol.y[0, -1], np.exp(-0.1)) @pytest.mark.parametrize("f0_fill", [np.nan, np.inf]) def test_initial_state_finiteness(f0_fill): # regression test for gh-17846 msg = "All components of the initial state `y0` must be finite." with pytest.raises(ValueError, match=msg): solve_ivp(fun_zero, [0, 10], np.full(3, f0_fill))
35,940
31.852834
102
py
scipy
scipy-main/scipy/integrate/_ivp/tests/test_rk.py
import pytest from numpy.testing import assert_allclose, assert_ import numpy as np from scipy.integrate import RK23, RK45, DOP853 from scipy.integrate._ivp import dop853_coefficients @pytest.mark.parametrize("solver", [RK23, RK45, DOP853]) def test_coefficient_properties(solver): assert_allclose(np.sum(solver.B), 1, rtol=1e-15) assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14) def test_coefficient_properties_dop853(): assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15) assert_allclose(np.sum(dop853_coefficients.A, axis=1), dop853_coefficients.C, rtol=1e-14) @pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853]) def test_error_estimation(solver_class): step = 0.2 solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step) solver.step() error_estimate = solver._estimate_error(solver.K, step) error = solver.y - np.exp([step]) assert_(np.abs(error) < np.abs(error_estimate)) @pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853]) def test_error_estimation_complex(solver_class): h = 0.2 solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h) solver.step() err_norm = solver._estimate_error_norm(solver.K, h, scale=[1]) assert np.isrealobj(err_norm)
1,326
33.921053
72
py
scipy
scipy-main/scipy/integrate/_ivp/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/sparse/dia.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _dia __all__ = [ # noqa: F822 'check_shape', 'dia_matrix', 'dia_matvec', 'get_index_dtype', 'get_sum_dtype', 'getdtype', 'isshape', 'isspmatrix', 'isspmatrix_dia', 'spmatrix', 'upcast_char', 'validateaxis', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.dia is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.dia` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_dia, name)
936
22.425
76
py
scipy
scipy-main/scipy/sparse/_index.py
"""Indexing mixin for sparse matrix classes. """ import numpy as np from warnings import warn from ._sputils import isintlike INT_TYPES = (int, np.integer) def _broadcast_arrays(a, b): """ Same as np.broadcast_arrays(a, b) but old writeability rules. NumPy >= 1.17.0 transitions broadcast_arrays to return read-only arrays. Set writeability explicitly to avoid warnings. Retain the old writeability rules, as our Cython code assumes the old behavior. """ x, y = np.broadcast_arrays(a, b) x.flags.writeable = a.flags.writeable y.flags.writeable = b.flags.writeable return x, y class IndexMixin: """ This class provides common dispatching and validation logic for indexing. """ def _raise_on_1d_array_slice(self): """We do not currently support 1D sparse arrays. This function is called each time that a 1D array would result, raising an error instead. Once 1D sparse arrays are implemented, it should be removed. """ if self._is_array: raise NotImplementedError( 'We have not yet implemented 1D sparse slices; ' 'please index using explicit indices, e.g. `x[:, [0]]`' ) def __getitem__(self, key): row, col = self._validate_indices(key) # Dispatch to specialized methods. if isinstance(row, INT_TYPES): if isinstance(col, INT_TYPES): return self._get_intXint(row, col) elif isinstance(col, slice): self._raise_on_1d_array_slice() return self._get_intXslice(row, col) elif col.ndim == 1: self._raise_on_1d_array_slice() return self._get_intXarray(row, col) elif col.ndim == 2: return self._get_intXarray(row, col) raise IndexError('index results in >2 dimensions') elif isinstance(row, slice): if isinstance(col, INT_TYPES): self._raise_on_1d_array_slice() return self._get_sliceXint(row, col) elif isinstance(col, slice): if row == slice(None) and row == col: return self.copy() return self._get_sliceXslice(row, col) elif col.ndim == 1: return self._get_sliceXarray(row, col) raise IndexError('index results in >2 dimensions') elif row.ndim == 1: if isinstance(col, INT_TYPES): self._raise_on_1d_array_slice() return self._get_arrayXint(row, col) elif isinstance(col, slice): return self._get_arrayXslice(row, col) else: # row.ndim == 2 if isinstance(col, INT_TYPES): return self._get_arrayXint(row, col) elif isinstance(col, slice): raise IndexError('index results in >2 dimensions') elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1): # special case for outer indexing return self._get_columnXarray(row[:,0], col.ravel()) # The only remaining case is inner (fancy) indexing row, col = _broadcast_arrays(row, col) if row.shape != col.shape: raise IndexError('number of row and column indices differ') if row.size == 0: return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype) return self._get_arrayXarray(row, col) def __setitem__(self, key, x): row, col = self._validate_indices(key) if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES): x = np.asarray(x, dtype=self.dtype) if x.size != 1: raise ValueError('Trying to assign a sequence to an item') self._set_intXint(row, col, x.flat[0]) return if isinstance(row, slice): row = np.arange(*row.indices(self.shape[0]))[:, None] else: row = np.atleast_1d(row) if isinstance(col, slice): col = np.arange(*col.indices(self.shape[1]))[None, :] if row.ndim == 1: row = row[:, None] else: col = np.atleast_1d(col) i, j = _broadcast_arrays(row, col) if i.shape != j.shape: raise IndexError('number of row and column indices differ') from ._base import issparse if issparse(x): if i.ndim == 1: # Inner indexing, so treat them like row vectors. i = i[None] j = j[None] broadcast_row = x.shape[0] == 1 and i.shape[0] != 1 broadcast_col = x.shape[1] == 1 and i.shape[1] != 1 if not ((broadcast_row or x.shape[0] == i.shape[0]) and (broadcast_col or x.shape[1] == i.shape[1])): raise ValueError('shape mismatch in assignment') if x.shape[0] == 0 or x.shape[1] == 0: return x = x.tocoo(copy=True) x.sum_duplicates() self._set_arrayXarray_sparse(i, j, x) else: # Make x and i into the same shape x = np.asarray(x, dtype=self.dtype) if x.squeeze().shape != i.squeeze().shape: x = np.broadcast_to(x, i.shape) if x.size == 0: return x = x.reshape(i.shape) self._set_arrayXarray(i, j, x) def _validate_indices(self, key): M, N = self.shape row, col = _unpack_index(key) if isintlike(row): row = int(row) if row < -M or row >= M: raise IndexError('row index (%d) out of range' % row) if row < 0: row += M elif not isinstance(row, slice): row = self._asindices(row, M) if isintlike(col): col = int(col) if col < -N or col >= N: raise IndexError('column index (%d) out of range' % col) if col < 0: col += N elif not isinstance(col, slice): col = self._asindices(col, N) return row, col def _asindices(self, idx, length): """Convert `idx` to a valid index for an axis with a given length. Subclasses that need special validation can override this method. """ try: x = np.asarray(idx) except (ValueError, TypeError, MemoryError) as e: raise IndexError('invalid index') from e if x.ndim not in (1, 2): raise IndexError('Index dimension must be 1 or 2') if x.size == 0: return x # Check bounds max_indx = x.max() if max_indx >= length: raise IndexError('index (%d) out of range' % max_indx) min_indx = x.min() if min_indx < 0: if min_indx < -length: raise IndexError('index (%d) out of range' % min_indx) if x is idx or not x.flags.owndata: x = x.copy() x[x < 0] += length return x def _getrow(self, i): """Return a copy of row i of the matrix, as a (1 x n) row vector. """ M, N = self.shape i = int(i) if i < -M or i >= M: raise IndexError('index (%d) out of range' % i) if i < 0: i += M return self._get_intXslice(i, slice(None)) def _getcol(self, i): """Return a copy of column i of the matrix, as a (m x 1) column vector. """ M, N = self.shape i = int(i) if i < -N or i >= N: raise IndexError('index (%d) out of range' % i) if i < 0: i += N return self._get_sliceXint(slice(None), i) def _get_intXint(self, row, col): raise NotImplementedError() def _get_intXarray(self, row, col): raise NotImplementedError() def _get_intXslice(self, row, col): raise NotImplementedError() def _get_sliceXint(self, row, col): raise NotImplementedError() def _get_sliceXslice(self, row, col): raise NotImplementedError() def _get_sliceXarray(self, row, col): raise NotImplementedError() def _get_arrayXint(self, row, col): raise NotImplementedError() def _get_arrayXslice(self, row, col): raise NotImplementedError() def _get_columnXarray(self, row, col): raise NotImplementedError() def _get_arrayXarray(self, row, col): raise NotImplementedError() def _set_intXint(self, row, col, x): raise NotImplementedError() def _set_arrayXarray(self, row, col, x): raise NotImplementedError() def _set_arrayXarray_sparse(self, row, col, x): # Fall back to densifying x x = np.asarray(x.toarray(), dtype=self.dtype) x, _ = _broadcast_arrays(x, row) self._set_arrayXarray(row, col, x) def _unpack_index(index): """ Parse index. Always return a tuple of the form (row, col). Valid type for row/col is integer, slice, or array of integers. """ # First, check if indexing with single boolean matrix. from ._base import _spbase, issparse if (isinstance(index, (_spbase, np.ndarray)) and index.ndim == 2 and index.dtype.kind == 'b'): return index.nonzero() # Parse any ellipses. index = _check_ellipsis(index) # Next, parse the tuple or object if isinstance(index, tuple): if len(index) == 2: row, col = index elif len(index) == 1: row, col = index[0], slice(None) else: raise IndexError('invalid number of indices') else: idx = _compatible_boolean_index(index) if idx is None: row, col = index, slice(None) elif idx.ndim < 2: return _boolean_index_to_array(idx), slice(None) elif idx.ndim == 2: return idx.nonzero() # Next, check for validity and transform the index as needed. if issparse(row) or issparse(col): # Supporting sparse boolean indexing with both row and col does # not work because spmatrix.ndim is always 2. raise IndexError( 'Indexing with sparse matrices is not supported ' 'except boolean indexing where matrix and index ' 'are equal shapes.') bool_row = _compatible_boolean_index(row) bool_col = _compatible_boolean_index(col) if bool_row is not None: row = _boolean_index_to_array(bool_row) if bool_col is not None: col = _boolean_index_to_array(bool_col) return row, col def _check_ellipsis(index): """Process indices with Ellipsis. Returns modified index.""" if index is Ellipsis: return (slice(None), slice(None)) if not isinstance(index, tuple): return index # Find any Ellipsis objects. ellipsis_indices = [i for i, v in enumerate(index) if v is Ellipsis] if not ellipsis_indices: return index if len(ellipsis_indices) > 1: warn('multi-Ellipsis indexing is deprecated will be removed in v1.13.', DeprecationWarning, stacklevel=2) first_ellipsis = ellipsis_indices[0] # Try to expand it using shortcuts for common cases if len(index) == 1: return (slice(None), slice(None)) if len(index) == 2: if first_ellipsis == 0: if index[1] is Ellipsis: return (slice(None), slice(None)) return (slice(None), index[1]) return (index[0], slice(None)) # Expand it using a general-purpose algorithm tail = [] for v in index[first_ellipsis+1:]: if v is not Ellipsis: tail.append(v) nd = first_ellipsis + len(tail) nslice = max(0, 2 - nd) return index[:first_ellipsis] + (slice(None),)*nslice + tuple(tail) def _maybe_bool_ndarray(idx): """Returns a compatible array if elements are boolean. """ idx = np.asanyarray(idx) if idx.dtype.kind == 'b': return idx return None def _first_element_bool(idx, max_dim=2): """Returns True if first element of the incompatible array type is boolean. """ if max_dim < 1: return None try: first = next(iter(idx), None) except TypeError: return None if isinstance(first, bool): return True return _first_element_bool(first, max_dim-1) def _compatible_boolean_index(idx): """Returns a boolean index array that can be converted to integer array. Returns None if no such array exists. """ # Presence of attribute `ndim` indicates a compatible array type. if hasattr(idx, 'ndim') or _first_element_bool(idx): return _maybe_bool_ndarray(idx) return None def _boolean_index_to_array(idx): if idx.ndim > 1: raise IndexError('invalid index shape') return np.where(idx)[0]
12,931
32.58961
79
py
scipy
scipy-main/scipy/sparse/base.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _base __all__ = [ # noqa: F822 'MAXPRINT', 'SparseEfficiencyWarning', 'SparseFormatWarning', 'SparseWarning', 'asmatrix', 'check_reshape_kwargs', 'check_shape', 'get_sum_dtype', 'isdense', 'isintlike', 'isscalarlike', 'issparse', 'isspmatrix', 'spmatrix', 'validateaxis', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.base is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.base` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_base, name)
1,016
22.651163
76
py
scipy
scipy-main/scipy/sparse/sparsetools.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _sparsetools __all__ = [ # noqa: F822 'bsr_diagonal', 'bsr_eldiv_bsr', 'bsr_elmul_bsr', 'bsr_ge_bsr', 'bsr_gt_bsr', 'bsr_le_bsr', 'bsr_lt_bsr', 'bsr_matmat', 'bsr_matvec', 'bsr_matvecs', 'bsr_maximum_bsr', 'bsr_minimum_bsr', 'bsr_minus_bsr', 'bsr_ne_bsr', 'bsr_plus_bsr', 'bsr_scale_columns', 'bsr_scale_rows', 'bsr_sort_indices', 'bsr_tocsr', 'bsr_transpose', 'coo_matvec', 'coo_tocsr', 'coo_todense', 'cs_graph_components', 'csc_diagonal', 'csc_eldiv_csc', 'csc_elmul_csc', 'csc_ge_csc', 'csc_gt_csc', 'csc_le_csc', 'csc_lt_csc', 'csc_matmat', 'csc_matmat_maxnnz', 'csc_matvec', 'csc_matvecs', 'csc_maximum_csc', 'csc_minimum_csc', 'csc_minus_csc', 'csc_ne_csc', 'csc_plus_csc', 'csc_tocsr', 'csr_column_index1', 'csr_column_index2', 'csr_count_blocks', 'csr_diagonal', 'csr_eldiv_csr', 'csr_eliminate_zeros', 'csr_elmul_csr', 'csr_ge_csr', 'csr_gt_csr', 'csr_has_canonical_format', 'csr_has_sorted_indices', 'csr_hstack', 'csr_le_csr', 'csr_lt_csr', 'csr_matmat', 'csr_matmat_maxnnz', 'csr_matvec', 'csr_matvecs', 'csr_maximum_csr', 'csr_minimum_csr', 'csr_minus_csr', 'csr_ne_csr', 'csr_plus_csr', 'csr_row_index', 'csr_row_slice', 'csr_sample_offsets', 'csr_sample_values', 'csr_scale_columns', 'csr_scale_rows', 'csr_sort_indices', 'csr_sum_duplicates', 'csr_tobsr', 'csr_tocsc', 'csr_todense', 'dia_matvec', 'expandptr', 'get_csr_submatrix', 'test_throw_error', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.sparsetools is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.sparsetools` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_sparsetools, name)
2,390
21.345794
76
py
scipy
scipy-main/scipy/sparse/_dok.py
"""Dictionary Of Keys based matrix""" __docformat__ = "restructuredtext en" __all__ = ['dok_array', 'dok_matrix', 'isspmatrix_dok'] import itertools import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._base import _spbase, sparray, issparse from ._index import IndexMixin from ._sputils import (isdense, getdtype, isshape, isintlike, isscalarlike, upcast, upcast_scalar, check_shape) try: from operator import isSequenceType as _is_sequence except ImportError: def _is_sequence(x): return (hasattr(x, '__len__') or hasattr(x, '__next__') or hasattr(x, 'next')) class _dok_base(_spbase, IndexMixin, dict): """ Dictionary Of Keys based sparse matrix. This is an efficient structure for constructing sparse matrices incrementally. This can be instantiated in several ways: dok_array(D) with a dense matrix, D dok_array(S) with a sparse matrix, S dok_array((M,N), [dtype]) create the matrix with initial shape (M,N) dtype is optional, defaulting to dtype='d' Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Allows for efficient O(1) access of individual elements. Duplicates are not allowed. Can be efficiently converted to a coo_matrix once constructed. Examples -------- >>> import numpy as np >>> from scipy.sparse import dok_array >>> S = dok_array((5, 5), dtype=np.float32) >>> for i in range(5): ... for j in range(5): ... S[i, j] = i + j # Update element """ _format = 'dok' def __init__(self, arg1, shape=None, dtype=None, copy=False): dict.__init__(self) _spbase.__init__(self) self.dtype = getdtype(dtype, default=float) if isinstance(arg1, tuple) and isshape(arg1): # (M,N) M, N = arg1 self._shape = check_shape((M, N)) elif issparse(arg1): # Sparse ctor if arg1.format == self.format and copy: arg1 = arg1.copy() else: arg1 = arg1.todok() if dtype is not None: arg1 = arg1.astype(dtype, copy=False) dict.update(self, arg1) self._shape = check_shape(arg1.shape) self.dtype = arg1.dtype else: # Dense ctor try: arg1 = np.asarray(arg1) except Exception as e: raise TypeError('Invalid input format.') from e if len(arg1.shape) != 2: raise TypeError('Expected rank <=2 dense array or matrix.') d = self._coo_container(arg1, dtype=dtype).todok() dict.update(self, d) self._shape = check_shape(arg1.shape) self.dtype = d.dtype def update(self, val): # Prevent direct usage of update raise NotImplementedError("Direct modification to dok_array element " "is not allowed.") def _update(self, data): """An update method for dict data defined for direct access to `dok_array` data. Main purpose is to be used for effcient conversion from other _spbase classes. Has no checking if `data` is valid.""" return dict.update(self, data) def _getnnz(self, axis=None): if axis is not None: raise NotImplementedError("_getnnz over an axis is not implemented " "for DOK format.") return dict.__len__(self) def count_nonzero(self): return sum(x != 0 for x in self.values()) _getnnz.__doc__ = _spbase._getnnz.__doc__ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ def __len__(self): return dict.__len__(self) def get(self, key, default=0.): """This overrides the dict.get method, providing type checking but otherwise equivalent functionality. """ try: i, j = key assert isintlike(i) and isintlike(j) except (AssertionError, TypeError, ValueError) as e: raise IndexError('Index must be a pair of integers.') from e if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]): raise IndexError('Index out of bounds.') return dict.get(self, key, default) def _get_intXint(self, row, col): return dict.get(self, (row, col), self.dtype.type(0)) def _get_intXslice(self, row, col): return self._get_sliceXslice(slice(row, row+1), col) def _get_sliceXint(self, row, col): return self._get_sliceXslice(row, slice(col, col+1)) def _get_sliceXslice(self, row, col): row_start, row_stop, row_step = row.indices(self.shape[0]) col_start, col_stop, col_step = col.indices(self.shape[1]) row_range = range(row_start, row_stop, row_step) col_range = range(col_start, col_stop, col_step) shape = (len(row_range), len(col_range)) # Switch paths only when advantageous # (count the iterations in the loops, adjust for complexity) if len(self) >= 2 * shape[0] * shape[1]: # O(nr*nc) path: loop over <row x col> return self._get_columnXarray(row_range, col_range) # O(nnz) path: loop over entries of self newdok = self._dok_container(shape, dtype=self.dtype) for key in self.keys(): i, ri = divmod(int(key[0]) - row_start, row_step) if ri != 0 or i < 0 or i >= shape[0]: continue j, rj = divmod(int(key[1]) - col_start, col_step) if rj != 0 or j < 0 or j >= shape[1]: continue x = dict.__getitem__(self, key) dict.__setitem__(newdok, (i, j), x) return newdok def _get_intXarray(self, row, col): col = col.squeeze() return self._get_columnXarray([row], col) def _get_arrayXint(self, row, col): row = row.squeeze() return self._get_columnXarray(row, [col]) def _get_sliceXarray(self, row, col): row = list(range(*row.indices(self.shape[0]))) return self._get_columnXarray(row, col) def _get_arrayXslice(self, row, col): col = list(range(*col.indices(self.shape[1]))) return self._get_columnXarray(row, col) def _get_columnXarray(self, row, col): # outer indexing newdok = self._dok_container((len(row), len(col)), dtype=self.dtype) for i, r in enumerate(row): for j, c in enumerate(col): v = dict.get(self, (r, c), 0) if v: dict.__setitem__(newdok, (i, j), v) return newdok def _get_arrayXarray(self, row, col): # inner indexing i, j = map(np.atleast_2d, np.broadcast_arrays(row, col)) newdok = self._dok_container(i.shape, dtype=self.dtype) for key in itertools.product(range(i.shape[0]), range(i.shape[1])): v = dict.get(self, (i[key], j[key]), 0) if v: dict.__setitem__(newdok, key, v) return newdok def _set_intXint(self, row, col, x): key = (row, col) if x: dict.__setitem__(self, key, x) elif dict.__contains__(self, key): del self[key] def _set_arrayXarray(self, row, col, x): row = list(map(int, row.ravel())) col = list(map(int, col.ravel())) x = x.ravel() dict.update(self, zip(zip(row, col), x)) for i in np.nonzero(x == 0)[0]: key = (row[i], col[i]) if dict.__getitem__(self, key) == 0: # may have been superseded by later update del self[key] def __add__(self, other): if isscalarlike(other): res_dtype = upcast_scalar(self.dtype, other) new = self._dok_container(self.shape, dtype=res_dtype) # Add this scalar to every element. M, N = self.shape for key in itertools.product(range(M), range(N)): aij = dict.get(self, (key), 0) + other if aij: new[key] = aij # new.dtype.char = self.dtype.char elif issparse(other): if other.format == "dok": if other.shape != self.shape: raise ValueError("Matrix dimensions are not equal.") # We could alternatively set the dimensions to the largest of # the two matrices to be summed. Would this be a good idea? res_dtype = upcast(self.dtype, other.dtype) new = self._dok_container(self.shape, dtype=res_dtype) dict.update(new, self) with np.errstate(over='ignore'): dict.update(new, ((k, new[k] + other[k]) for k in other.keys())) else: csc = self.tocsc() new = csc + other elif isdense(other): new = self.todense() + other else: return NotImplemented return new def __radd__(self, other): if isscalarlike(other): new = self._dok_container(self.shape, dtype=self.dtype) M, N = self.shape for key in itertools.product(range(M), range(N)): aij = dict.get(self, (key), 0) + other if aij: new[key] = aij elif issparse(other): if other.format == "dok": if other.shape != self.shape: raise ValueError("Matrix dimensions are not equal.") new = self._dok_container(self.shape, dtype=self.dtype) dict.update(new, self) dict.update(new, ((k, self[k] + other[k]) for k in other.keys())) else: csc = self.tocsc() new = csc + other elif isdense(other): new = other + self.todense() else: return NotImplemented return new def __neg__(self): if self.dtype.kind == 'b': raise NotImplementedError('Negating a sparse boolean matrix is not' ' supported.') new = self._dok_container(self.shape, dtype=self.dtype) dict.update(new, ((k, -self[k]) for k in self.keys())) return new def _mul_scalar(self, other): res_dtype = upcast_scalar(self.dtype, other) # Multiply this scalar by every element. new = self._dok_container(self.shape, dtype=res_dtype) dict.update(new, ((k, v * other) for k, v in self.items())) return new def _mul_vector(self, other): # matrix * vector result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) for (i, j), v in self.items(): result[i] += v * other[j] return result def _mul_multivector(self, other): # matrix * multivector result_shape = (self.shape[0], other.shape[1]) result_dtype = upcast(self.dtype, other.dtype) result = np.zeros(result_shape, dtype=result_dtype) for (i, j), v in self.items(): result[i,:] += v * other[j,:] return result def __imul__(self, other): if isscalarlike(other): dict.update(self, ((k, v * other) for k, v in self.items())) return self return NotImplemented def __truediv__(self, other): if isscalarlike(other): res_dtype = upcast_scalar(self.dtype, other) new = self._dok_container(self.shape, dtype=res_dtype) dict.update(new, ((k, v / other) for k, v in self.items())) return new return self.tocsr() / other def __itruediv__(self, other): if isscalarlike(other): dict.update(self, ((k, v / other) for k, v in self.items())) return self return NotImplemented def __reduce__(self): # this approach is necessary because __setstate__ is called after # __setitem__ upon unpickling and since __init__ is not called there # is no shape attribute hence it is not possible to unpickle it. return dict.__reduce__(self) # What should len(sparse) return? For consistency with dense matrices, # perhaps it should be the number of rows? For now it returns the number # of non-zeros. def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") M, N = self.shape new = self._dok_container((N, M), dtype=self.dtype, copy=copy) dict.update(new, (((right, left), val) for (left, right), val in self.items())) return new transpose.__doc__ = _spbase.transpose.__doc__ def conjtransp(self): """Return the conjugate transpose.""" M, N = self.shape new = self._dok_container((N, M), dtype=self.dtype) dict.update(new, (((right, left), np.conj(val)) for (left, right), val in self.items())) return new def copy(self): new = self._dok_container(self.shape, dtype=self.dtype) dict.update(new, self) return new copy.__doc__ = _spbase.copy.__doc__ def tocoo(self, copy=False): if self.nnz == 0: return self._coo_container(self.shape, dtype=self.dtype) idx_dtype = self._get_index_dtype(maxval=max(self.shape)) data = np.fromiter(self.values(), dtype=self.dtype, count=self.nnz) row = np.fromiter((i for i, _ in self.keys()), dtype=idx_dtype, count=self.nnz) col = np.fromiter((j for _, j in self.keys()), dtype=idx_dtype, count=self.nnz) A = self._coo_container( (data, (row, col)), shape=self.shape, dtype=self.dtype ) A.has_canonical_format = True return A tocoo.__doc__ = _spbase.tocoo.__doc__ def todok(self, copy=False): if copy: return self.copy() return self todok.__doc__ = _spbase.todok.__doc__ def tocsc(self, copy=False): return self.tocoo(copy=False).tocsc(copy=copy) tocsc.__doc__ = _spbase.tocsc.__doc__ def resize(self, *shape): shape = check_shape(shape) newM, newN = shape M, N = self.shape if newM < M or newN < N: # Remove all elements outside new dimensions for (i, j) in list(self.keys()): if i >= newM or j >= newN: del self[i, j] self._shape = shape resize.__doc__ = _spbase.resize.__doc__ def isspmatrix_dok(x): """Is `x` of dok_array type? Parameters ---------- x object to check for being a dok matrix Returns ------- bool True if `x` is a dok matrix, False otherwise Examples -------- >>> from scipy.sparse import dok_array, dok_matrix, coo_matrix, isspmatrix_dok >>> isspmatrix_dok(dok_matrix([[5]])) True >>> isspmatrix_dok(dok_array([[5]])) False >>> isspmatrix_dok(coo_matrix([[5]])) False """ return isinstance(x, dok_matrix) # This namespace class separates array from matrix with isinstance class dok_array(_dok_base, sparray): pass dok_array.__doc__ = _dok_base.__doc__ class dok_matrix(spmatrix, _dok_base): def set_shape(self, shape): new_matrix = self.reshape(shape, copy=False).asformat(self.format) self.__dict__ = new_matrix.__dict__ dict.clear(self) dict.update(self, new_matrix) def get_shape(self): """Get shape of a sparse array.""" return self._shape shape = property(fget=get_shape, fset=set_shape) dok_matrix.__doc__ = _array_doc_to_matrix(_dok_base.__doc__)
16,352
33.572939
87
py
scipy
scipy-main/scipy/sparse/_sputils.py
""" Utility functions for sparse matrix module """ import sys import operator import numpy as np from math import prod import scipy.sparse as sp __all__ = ['upcast', 'getdtype', 'getdata', 'isscalarlike', 'isintlike', 'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype'] supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, np.single, np.double, np.longdouble, np.csingle, np.cdouble, np.clongdouble] _upcast_memo = {} def upcast(*args): """Returns the nearest supported sparse dtype for the combination of one or more types. upcast(t0, t1, ..., tn) -> T where T is a supported dtype Examples -------- >>> upcast('int32') <type 'numpy.int32'> >>> upcast('bool') <type 'numpy.bool_'> >>> upcast('int32','float32') <type 'numpy.float64'> >>> upcast('bool',complex,float) <type 'numpy.complex128'> """ t = _upcast_memo.get(hash(args)) if t is not None: return t upcast = np.result_type(*args) for t in supported_dtypes: if np.can_cast(upcast, t): _upcast_memo[hash(args)] = t return t raise TypeError(f'no supported conversion for types: {args!r}') def upcast_char(*args): """Same as `upcast` but taking dtype.char as input (faster).""" t = _upcast_memo.get(args) if t is not None: return t t = upcast(*map(np.dtype, args)) _upcast_memo[args] = t return t def upcast_scalar(dtype, scalar): """Determine data type for binary operation between an array of type `dtype` and a scalar. """ return (np.array([0], dtype=dtype) * scalar).dtype def downcast_intp_index(arr): """ Down-cast index array to np.intp dtype if it is of a larger dtype. Raise an error if the array contains a value that is too large for intp. """ if arr.dtype.itemsize > np.dtype(np.intp).itemsize: if arr.size == 0: return arr.astype(np.intp) maxval = arr.max() minval = arr.min() if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min: raise ValueError("Cannot deal with arrays with indices larger " "than the machine maximum address size " "(e.g. 64-bit indices on 32-bit machine).") return arr.astype(np.intp) return arr def to_native(A): """ Ensure that the data type of the NumPy array `A` has native byte order. `A` must be a NumPy array. If the data type of `A` does not have native byte order, a copy of `A` with a native byte order is returned. Otherwise `A` is returned. """ dt = A.dtype if dt.isnative: # Don't call `asarray()` if A is already native, to avoid unnecessarily # creating a view of the input array. return A return np.asarray(A, dtype=dt.newbyteorder('native')) def getdtype(dtype, a=None, default=None): """Function used to simplify argument processing. If 'dtype' is not specified (is None), returns a.dtype; otherwise returns a np.dtype object created from the specified dtype argument. If 'dtype' and 'a' are both None, construct a data type out of the 'default' parameter. Furthermore, 'dtype' must be in 'allowed' set. """ # TODO is this really what we want? if dtype is None: try: newdtype = a.dtype except AttributeError as e: if default is not None: newdtype = np.dtype(default) else: raise TypeError("could not interpret data type") from e else: newdtype = np.dtype(dtype) if newdtype == np.object_: raise ValueError( "object dtype is not supported by sparse matrices" ) return newdtype def getdata(obj, dtype=None, copy=False) -> np.ndarray: """ This is a wrapper of `np.array(obj, dtype=dtype, copy=copy)` that will generate a warning if the result is an object array. """ data = np.array(obj, dtype=dtype, copy=copy) # Defer to getdtype for checking that the dtype is OK. # This is called for the validation only; we don't need the return value. getdtype(data.dtype) return data def get_index_dtype(arrays=(), maxval=None, check_contents=False): """ Based on input (integer) arrays `a`, determine a suitable index data type that can hold the data in the arrays. Parameters ---------- arrays : tuple of array_like Input arrays whose types/contents to check maxval : float, optional Maximum value needed check_contents : bool, optional Whether to check the values in the arrays and not just their types. Default: False (check only the types) Returns ------- dtype : dtype Suitable index data type (int32 or int64) """ int32min = np.int32(np.iinfo(np.int32).min) int32max = np.int32(np.iinfo(np.int32).max) # not using intc directly due to misinteractions with pythran dtype = np.int32 if np.intc().itemsize == 4 else np.int64 if maxval is not None: maxval = np.int64(maxval) if maxval > int32max: dtype = np.int64 if isinstance(arrays, np.ndarray): arrays = (arrays,) for arr in arrays: arr = np.asarray(arr) if not np.can_cast(arr.dtype, np.int32): if check_contents: if arr.size == 0: # a bigger type not needed continue elif np.issubdtype(arr.dtype, np.integer): maxval = arr.max() minval = arr.min() if minval >= int32min and maxval <= int32max: # a bigger type not needed continue dtype = np.int64 break return dtype def get_sum_dtype(dtype): """Mimic numpy's casting for np.sum""" if dtype.kind == 'u' and np.can_cast(dtype, np.uint): return np.uint if np.can_cast(dtype, np.int_): return np.int_ return dtype def isscalarlike(x) -> bool: """Is x either a scalar, an array scalar, or a 0-dim array?""" return np.isscalar(x) or (isdense(x) and x.ndim == 0) def isintlike(x) -> bool: """Is x appropriate as an index into a sparse matrix? Returns True if it can be cast safely to a machine int. """ # Fast-path check to eliminate non-scalar values. operator.index would # catch this case too, but the exception catching is slow. if np.ndim(x) != 0: return False try: operator.index(x) except (TypeError, ValueError): try: loose_int = bool(int(x) == x) except (TypeError, ValueError): return False if loose_int: msg = "Inexact indices into sparse matrices are not allowed" raise ValueError(msg) return loose_int return True def isshape(x, nonneg=False, allow_ndim=False) -> bool: """Is x a valid tuple of dimensions? If nonneg, also checks that the dimensions are non-negative. If allow_ndim, shapes of any dimensionality are allowed. """ ndim = len(x) if not allow_ndim and ndim != 2: return False for d in x: if not isintlike(d): return False if nonneg and d < 0: return False return True def issequence(t) -> bool: return ((isinstance(t, (list, tuple)) and (len(t) == 0 or np.isscalar(t[0]))) or (isinstance(t, np.ndarray) and (t.ndim == 1))) def ismatrix(t) -> bool: return ((isinstance(t, (list, tuple)) and len(t) > 0 and issequence(t[0])) or (isinstance(t, np.ndarray) and t.ndim == 2)) def isdense(x) -> bool: return isinstance(x, np.ndarray) def validateaxis(axis) -> None: if axis is None: return axis_type = type(axis) # In NumPy, you can pass in tuples for 'axis', but they are # not very useful for sparse matrices given their limited # dimensions, so let's make it explicit that they are not # allowed to be passed in if axis_type == tuple: raise TypeError("Tuples are not accepted for the 'axis' parameter. " "Please pass in one of the following: " "{-2, -1, 0, 1, None}.") # If not a tuple, check that the provided axis is actually # an integer and raise a TypeError similar to NumPy's if not np.issubdtype(np.dtype(axis_type), np.integer): raise TypeError(f"axis must be an integer, not {axis_type.__name__}") if not (-2 <= axis <= 1): raise ValueError("axis out of range") def check_shape(args, current_shape=None): """Imitate numpy.matrix handling of shape arguments""" if len(args) == 0: raise TypeError("function missing 1 required positional argument: " "'shape'") if len(args) == 1: try: shape_iter = iter(args[0]) except TypeError: new_shape = (operator.index(args[0]), ) else: new_shape = tuple(operator.index(arg) for arg in shape_iter) else: new_shape = tuple(operator.index(arg) for arg in args) if current_shape is None: if len(new_shape) != 2: raise ValueError('shape must be a 2-tuple of positive integers') elif any(d < 0 for d in new_shape): raise ValueError("'shape' elements cannot be negative") else: # Check the current size only if needed current_size = prod(current_shape) # Check for negatives negative_indexes = [i for i, x in enumerate(new_shape) if x < 0] if not negative_indexes: new_size = prod(new_shape) if new_size != current_size: raise ValueError('cannot reshape array of size {} into shape {}' .format(current_size, new_shape)) elif len(negative_indexes) == 1: skip = negative_indexes[0] specified = prod(new_shape[:skip] + new_shape[skip+1:]) unspecified, remainder = divmod(current_size, specified) if remainder != 0: err_shape = tuple('newshape' if x < 0 else x for x in new_shape) raise ValueError('cannot reshape array of size {} into shape {}' ''.format(current_size, err_shape)) new_shape = new_shape[:skip] + (unspecified,) + new_shape[skip+1:] else: raise ValueError('can only specify one unknown dimension') if len(new_shape) != 2: raise ValueError('matrix shape must be two-dimensional') return new_shape def check_reshape_kwargs(kwargs): """Unpack keyword arguments for reshape function. This is useful because keyword arguments after star arguments are not allowed in Python 2, but star keyword arguments are. This function unpacks 'order' and 'copy' from the star keyword arguments (with defaults) and throws an error for any remaining. """ order = kwargs.pop('order', 'C') copy = kwargs.pop('copy', False) if kwargs: # Some unused kwargs remain raise TypeError('reshape() got unexpected keywords arguments: {}' .format(', '.join(kwargs.keys()))) return order, copy def is_pydata_spmatrix(m) -> bool: """ Check whether object is pydata/sparse matrix, avoiding importing the module. """ base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None) return base_cls is not None and isinstance(m, base_cls) ############################################################################### # Wrappers for NumPy types that are deprecated # Numpy versions of these functions raise deprecation warnings, the # ones below do not. def matrix(*args, **kwargs): return np.array(*args, **kwargs).view(np.matrix) def asmatrix(data, dtype=None): if isinstance(data, np.matrix) and (dtype is None or data.dtype == dtype): return data return np.asarray(data, dtype=dtype).view(np.matrix) ############################################################################### def _todata(s) -> np.ndarray: """Access nonzero values, possibly after summing duplicates. Parameters ---------- s : sparse array Input sparse array. Returns ------- data: ndarray Nonzero values of the array, with shape (s.nnz,) """ if isinstance(s, sp._data._data_matrix): return s._deduped_data() if isinstance(s, sp.dok_array): return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz) if isinstance(s, sp.lil_array): data = np.empty(s.nnz, dtype=s.dtype) sp._csparsetools.lil_flatten_to_array(s.data, data) return data return s.tocoo()._deduped_data()
13,023
30.61165
80
py
scipy
scipy-main/scipy/sparse/_data.py
"""Base class for sparse matrice with a .data attribute subclasses must provide a _with_data() method that creates a new matrix with the same sparsity pattern as self but with a different data array """ import numpy as np from ._base import _spbase, _ufuncs_with_fixed_point_at_zero from ._sputils import isscalarlike, validateaxis __all__ = [] # TODO implement all relevant operations # use .data.__methods__() instead of /=, *=, etc. class _data_matrix(_spbase): def __init__(self): _spbase.__init__(self) def _get_dtype(self): return self.data.dtype def _set_dtype(self, newtype): self.data.dtype = newtype dtype = property(fget=_get_dtype, fset=_set_dtype) def _deduped_data(self): if hasattr(self, 'sum_duplicates'): self.sum_duplicates() return self.data def __abs__(self): return self._with_data(abs(self._deduped_data())) def __round__(self, ndigits=0): return self._with_data(np.around(self._deduped_data(), decimals=ndigits)) def _real(self): return self._with_data(self.data.real) def _imag(self): return self._with_data(self.data.imag) def __neg__(self): if self.dtype.kind == 'b': raise NotImplementedError('negating a boolean sparse array is not ' 'supported') return self._with_data(-self.data) def __imul__(self, other): # self *= other if isscalarlike(other): self.data *= other return self else: return NotImplemented def __itruediv__(self, other): # self /= other if isscalarlike(other): recip = 1.0 / other self.data *= recip return self else: return NotImplemented def astype(self, dtype, casting='unsafe', copy=True): dtype = np.dtype(dtype) if self.dtype != dtype: matrix = self._with_data( self.data.astype(dtype, casting=casting, copy=True), copy=True ) return matrix._with_data(matrix._deduped_data(), copy=False) elif copy: return self.copy() else: return self astype.__doc__ = _spbase.astype.__doc__ def conjugate(self, copy=True): if np.issubdtype(self.dtype, np.complexfloating): return self._with_data(self.data.conjugate(), copy=copy) elif copy: return self.copy() else: return self conjugate.__doc__ = _spbase.conjugate.__doc__ def copy(self): return self._with_data(self.data.copy(), copy=True) copy.__doc__ = _spbase.copy.__doc__ def count_nonzero(self): return np.count_nonzero(self._deduped_data()) count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ def power(self, n, dtype=None): """ This function performs element-wise power. Parameters ---------- n : n is a scalar dtype : If dtype is not specified, the current dtype will be preserved. """ if not isscalarlike(n): raise NotImplementedError("input is not scalar") data = self._deduped_data() if dtype is not None: data = data.astype(dtype) return self._with_data(data ** n) ########################### # Multiplication handlers # ########################### def _mul_scalar(self, other): return self._with_data(self.data * other) # Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix. for npfunc in _ufuncs_with_fixed_point_at_zero: name = npfunc.__name__ def _create_method(op): def method(self): result = op(self._deduped_data()) return self._with_data(result, copy=True) method.__doc__ = ("Element-wise {}.\n\n" "See `numpy.{}` for more information.".format(name, name)) method.__name__ = name return method setattr(_data_matrix, name, _create_method(npfunc)) def _find_missing_index(ind, n): for k, a in enumerate(ind): if k != a: return k k += 1 if k < n: return k else: return -1 class _minmax_mixin: """Mixin for min and max methods. These are not implemented for dia_matrix, hence the separate class. """ def _min_or_max_axis(self, axis, min_or_max): N = self.shape[axis] if N == 0: raise ValueError("zero-size array to reduction operation") M = self.shape[1 - axis] idx_dtype = self._get_index_dtype(maxval=M) mat = self.tocsc() if axis == 0 else self.tocsr() mat.sum_duplicates() major_index, value = mat._minor_reduce(min_or_max) not_full = np.diff(mat.indptr)[major_index] < N value[not_full] = min_or_max(value[not_full], 0) mask = value != 0 major_index = np.compress(mask, major_index) value = np.compress(mask, value) if axis == 0: return self._coo_container( (value, (np.zeros(len(value), dtype=idx_dtype), major_index)), dtype=self.dtype, shape=(1, M) ) else: return self._coo_container( (value, (major_index, np.zeros(len(value), dtype=idx_dtype))), dtype=self.dtype, shape=(M, 1) ) def _min_or_max(self, axis, out, min_or_max): if out is not None: raise ValueError("Sparse matrices do not support " "an 'out' parameter.") validateaxis(axis) if axis is None: if 0 in self.shape: raise ValueError("zero-size array to reduction operation") zero = self.dtype.type(0) if self.nnz == 0: return zero m = min_or_max.reduce(self._deduped_data().ravel()) if self.nnz != np.prod(self.shape): m = min_or_max(zero, m) return m if axis < 0: axis += 2 if (axis == 0) or (axis == 1): return self._min_or_max_axis(axis, min_or_max) else: raise ValueError("axis out of range") def _arg_min_or_max_axis(self, axis, argmin_or_argmax, compare): if self.shape[axis] == 0: raise ValueError("Can't apply the operation along a zero-sized " "dimension.") if axis < 0: axis += 2 zero = self.dtype.type(0) mat = self.tocsc() if axis == 0 else self.tocsr() mat.sum_duplicates() ret_size, line_size = mat._swap(mat.shape) ret = np.zeros(ret_size, dtype=int) nz_lines, = np.nonzero(np.diff(mat.indptr)) for i in nz_lines: p, q = mat.indptr[i:i + 2] data = mat.data[p:q] indices = mat.indices[p:q] extreme_index = argmin_or_argmax(data) extreme_value = data[extreme_index] if compare(extreme_value, zero) or q - p == line_size: ret[i] = indices[extreme_index] else: zero_ind = _find_missing_index(indices, line_size) if extreme_value == zero: ret[i] = min(extreme_index, zero_ind) else: ret[i] = zero_ind if axis == 1: ret = ret.reshape(-1, 1) return self._ascontainer(ret) def _arg_min_or_max(self, axis, out, argmin_or_argmax, compare): if out is not None: raise ValueError("Sparse types do not support an 'out' parameter.") validateaxis(axis) if axis is not None: return self._arg_min_or_max_axis(axis, argmin_or_argmax, compare) if 0 in self.shape: raise ValueError("Can't apply the operation to an empty matrix.") if self.nnz == 0: return 0 zero = self.dtype.type(0) mat = self.tocoo() # Convert to canonical form: no duplicates, sorted indices. mat.sum_duplicates() extreme_index = argmin_or_argmax(mat.data) extreme_value = mat.data[extreme_index] num_row, num_col = mat.shape # If the min value is less than zero, or max is greater than zero, # then we don't need to worry about implicit zeros. if compare(extreme_value, zero): # cast to Python int to avoid overflow and RuntimeError return (int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index])) # Cheap test for the rare case where we have no implicit zeros. size = num_row * num_col if size == mat.nnz: return (int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index])) # At this stage, any implicit zero could be the min or max value. # After sum_duplicates(), the `row` and `col` arrays are guaranteed to # be sorted in C-order, which means the linearized indices are sorted. linear_indices = mat.row * num_col + mat.col first_implicit_zero_index = _find_missing_index(linear_indices, size) if extreme_value == zero: return min(first_implicit_zero_index, extreme_index) return first_implicit_zero_index def max(self, axis=None, out=None): """ Return the maximum of the matrix or maximum along an axis. This takes all elements into account, not just the non-zero ones. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the maximum over all the matrix elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amax : coo_matrix or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- min : The minimum value of a sparse matrix along a given axis. numpy.matrix.max : NumPy's implementation of 'max' for matrices """ return self._min_or_max(axis, out, np.maximum) def min(self, axis=None, out=None): """ Return the minimum of the matrix or maximum along an axis. This takes all elements into account, not just the non-zero ones. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the minimum over all the matrix elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amin : coo_matrix or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- max : The maximum value of a sparse matrix along a given axis. numpy.matrix.min : NumPy's implementation of 'min' for matrices """ return self._min_or_max(axis, out, np.minimum) def nanmax(self, axis=None, out=None): """ Return the maximum of the matrix or maximum along an axis, ignoring any NaNs. This takes all elements into account, not just the non-zero ones. .. versionadded:: 1.11.0 Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the maximum is computed. The default is to compute the maximum over all the matrix elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amax : coo_matrix or scalar Maximum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- nanmin : The minimum value of a sparse matrix along a given axis, ignoring NaNs. max : The maximum value of a sparse matrix along a given axis, propagating NaNs. numpy.nanmax : NumPy's implementation of 'nanmax'. """ return self._min_or_max(axis, out, np.fmax) def nanmin(self, axis=None, out=None): """ Return the minimum of the matrix or minimum along an axis, ignoring any NaNs. This takes all elements into account, not just the non-zero ones. .. versionadded:: 1.11.0 Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the minimum is computed. The default is to compute the minimum over all the matrix elements, returning a scalar (i.e., `axis` = `None`). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- amin : coo_matrix or scalar Minimum of `a`. If `axis` is None, the result is a scalar value. If `axis` is given, the result is a sparse.coo_matrix of dimension ``a.ndim - 1``. See Also -------- nanmax : The maximum value of a sparse matrix along a given axis, ignoring NaNs. min : The minimum value of a sparse matrix along a given axis, propagating NaNs. numpy.nanmin : NumPy's implementation of 'nanmin'. """ return self._min_or_max(axis, out, np.fmin) def argmax(self, axis=None, out=None): """Return indices of maximum elements along an axis. Implicit zero elements are also taken into account. If there are several maximum values, the index of the first occurrence is returned. Parameters ---------- axis : {-2, -1, 0, 1, None}, optional Axis along which the argmax is computed. If None (default), index of the maximum element in the flatten data is returned. out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- ind : numpy.matrix or int Indices of maximum elements. If matrix, its size along `axis` is 1. """ return self._arg_min_or_max(axis, out, np.argmax, np.greater) def argmin(self, axis=None, out=None): """Return indices of minimum elements along an axis. Implicit zero elements are also taken into account. If there are several minimum values, the index of the first occurrence is returned. Parameters ---------- axis : {-2, -1, 0, 1, None}, optional Axis along which the argmin is computed. If None (default), index of the minimum element in the flatten data is returned. out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. Returns ------- ind : numpy.matrix or int Indices of minimum elements. If matrix, its size along `axis` is 1. """ return self._arg_min_or_max(axis, out, np.argmin, np.less)
16,409
32.627049
84
py
scipy
scipy-main/scipy/sparse/csr.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _csr __all__ = [ # noqa: F822 'csr_count_blocks', 'csr_matrix', 'csr_tobsr', 'csr_tocsc', 'get_csr_submatrix', 'get_index_dtype', 'isspmatrix_csr', 'spmatrix', 'upcast', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.csr is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.csr` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_csr, name)
887
23
76
py
scipy
scipy-main/scipy/sparse/spfuncs.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _spfuncs __all__ = [ # noqa: F822 'isspmatrix_csr', 'csr_matrix', 'isspmatrix_csc', 'csr_count_blocks', 'estimate_blocksize', 'count_blocks' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.spfuncs is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.spfuncs` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_spfuncs, name)
842
27.1
76
py
scipy
scipy-main/scipy/sparse/_extract.py
"""Functions to extract parts of sparse matrices """ __docformat__ = "restructuredtext en" __all__ = ['find', 'tril', 'triu'] from ._coo import coo_matrix def find(A): """Return the indices and values of the nonzero elements of a matrix Parameters ---------- A : dense or sparse matrix Matrix whose nonzero elements are desired. Returns ------- (I,J,V) : tuple of arrays I,J, and V contain the row indices, column indices, and values of the nonzero matrix entries. Examples -------- >>> from scipy.sparse import csr_matrix, find >>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]]) >>> find(A) (array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.])) """ A = coo_matrix(A, copy=True) A.sum_duplicates() # remove explicit zeros nz_mask = A.data != 0 return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask] def tril(A, k=0, format=None): """Return the lower triangular portion of a matrix in sparse format Returns the elements on or below the k-th diagonal of the matrix A. - k = 0 corresponds to the main diagonal - k > 0 is above the main diagonal - k < 0 is below the main diagonal Parameters ---------- A : dense or sparse matrix Matrix whose lower trianglar portion is desired. k : integer : optional The top-most diagonal of the lower triangle. format : string Sparse format of the result, e.g. format="csr", etc. Returns ------- L : sparse matrix Lower triangular portion of A in sparse format. See Also -------- triu : upper triangle in sparse format Examples -------- >>> from scipy.sparse import csr_matrix, tril >>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]], ... dtype='int32') >>> A.toarray() array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]]) >>> tril(A).toarray() array([[1, 0, 0, 0, 0], [4, 5, 0, 0, 0], [0, 0, 8, 0, 0]]) >>> tril(A).nnz 4 >>> tril(A, k=1).toarray() array([[1, 2, 0, 0, 0], [4, 5, 0, 0, 0], [0, 0, 8, 9, 0]]) >>> tril(A, k=-1).toarray() array([[0, 0, 0, 0, 0], [4, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> tril(A, format='csc') <3x5 sparse matrix of type '<class 'numpy.int32'>' with 4 stored elements in Compressed Sparse Column format> """ # convert to COOrdinate format where things are easy A = coo_matrix(A, copy=False) mask = A.row + k >= A.col return _masked_coo(A, mask).asformat(format) def triu(A, k=0, format=None): """Return the upper triangular portion of a matrix in sparse format Returns the elements on or above the k-th diagonal of the matrix A. - k = 0 corresponds to the main diagonal - k > 0 is above the main diagonal - k < 0 is below the main diagonal Parameters ---------- A : dense or sparse matrix Matrix whose upper trianglar portion is desired. k : integer : optional The bottom-most diagonal of the upper triangle. format : string Sparse format of the result, e.g. format="csr", etc. Returns ------- L : sparse matrix Upper triangular portion of A in sparse format. See Also -------- tril : lower triangle in sparse format Examples -------- >>> from scipy.sparse import csr_matrix, triu >>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]], ... dtype='int32') >>> A.toarray() array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]]) >>> triu(A).toarray() array([[1, 2, 0, 0, 3], [0, 5, 0, 6, 7], [0, 0, 8, 9, 0]]) >>> triu(A).nnz 8 >>> triu(A, k=1).toarray() array([[0, 2, 0, 0, 3], [0, 0, 0, 6, 7], [0, 0, 0, 9, 0]]) >>> triu(A, k=-1).toarray() array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]]) >>> triu(A, format='csc') <3x5 sparse matrix of type '<class 'numpy.int32'>' with 8 stored elements in Compressed Sparse Column format> """ # convert to COOrdinate format where things are easy A = coo_matrix(A, copy=False) mask = A.row + k <= A.col return _masked_coo(A, mask).asformat(format) def _masked_coo(A, mask): row = A.row[mask] col = A.col[mask] data = A.data[mask] return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
4,648
26.347059
90
py
scipy
scipy-main/scipy/sparse/compressed.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _compressed __all__ = [ # noqa: F822 'IndexMixin', 'SparseEfficiencyWarning', 'check_shape', 'csr_column_index1', 'csr_column_index2', 'csr_row_index', 'csr_row_slice', 'csr_sample_offsets', 'csr_sample_values', 'csr_todense', 'downcast_intp_index', 'get_csr_submatrix', 'get_index_dtype', 'get_sum_dtype', 'getdtype', 'is_pydata_spmatrix', 'isdense', 'isintlike', 'isscalarlike', 'isshape', 'isspmatrix', 'operator', 'spmatrix', 'to_native', 'upcast', 'upcast_char', 'warn', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.compressed is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.compressed` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_compressed, name)
1,286
22.4
76
py
scipy
scipy-main/scipy/sparse/setup.py
import os import sys import subprocess def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from scipy._build_utils.compiler_helper import set_cxx_flags_hook from scipy._build_utils import numpy_nodepr_api config = Configuration('sparse',parent_package,top_path) config.add_data_dir('tests') config.add_subpackage('linalg') config.add_subpackage('csgraph') config.add_extension('_csparsetools', sources=['_csparsetools.c']) def get_sparsetools_sources(ext, build_dir): # Defer generation of source files subprocess.check_call([sys.executable, os.path.join(os.path.dirname(__file__), '_generate_sparsetools.py'), '--no-force']) return [] depends = ['sparsetools_impl.h', 'bsr_impl.h', 'csc_impl.h', 'csr_impl.h', 'other_impl.h', 'bool_ops.h', 'bsr.h', 'complex_ops.h', 'coo.h', 'csc.h', 'csgraph.h', 'csr.h', 'dense.h', 'dia.h', 'sparsetools.h', 'util.h'] depends = [os.path.join('sparsetools', hdr) for hdr in depends], sparsetools = config.add_extension('_sparsetools', define_macros=[('__STDC_FORMAT_MACROS', 1)] + numpy_nodepr_api['define_macros'], depends=depends, include_dirs=['sparsetools'], sources=[os.path.join('sparsetools', 'sparsetools.cxx'), os.path.join('sparsetools', 'csr.cxx'), os.path.join('sparsetools', 'csc.cxx'), os.path.join('sparsetools', 'bsr.cxx'), os.path.join('sparsetools', 'other.cxx'), get_sparsetools_sources] ) sparsetools._pre_build_hook = set_cxx_flags_hook return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
2,319
35.25
105
py
scipy
scipy-main/scipy/sparse/bsr.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _bsr __all__ = [ # noqa: F822 'bsr_matmat', 'bsr_matrix', 'bsr_matvec', 'bsr_matvecs', 'bsr_sort_indices', 'bsr_tocsr', 'bsr_transpose', 'check_shape', 'csr_matmat_maxnnz', 'get_index_dtype', 'getdata', 'getdtype', 'isshape', 'isspmatrix', 'isspmatrix_bsr', 'spmatrix', 'to_native', 'upcast', 'warn', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.bsr is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.bsr` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_bsr, name)
1,058
21.531915
76
py
scipy
scipy-main/scipy/sparse/_matrix.py
from ._sputils import isintlike, isscalarlike class spmatrix: """This class provides a base class for all sparse matrix classes. It cannot be instantiated. Most of the work is provided by subclasses. """ _is_array = False @property def _bsr_container(self): from ._bsr import bsr_matrix return bsr_matrix @property def _coo_container(self): from ._coo import coo_matrix return coo_matrix @property def _csc_container(self): from ._csc import csc_matrix return csc_matrix @property def _csr_container(self): from ._csr import csr_matrix return csr_matrix @property def _dia_container(self): from ._dia import dia_matrix return dia_matrix @property def _dok_container(self): from ._dok import dok_matrix return dok_matrix @property def _lil_container(self): from ._lil import lil_matrix return lil_matrix # Restore matrix multiplication def __mul__(self, other): return self._mul_dispatch(other) def __rmul__(self, other): return self._rmul_dispatch(other) # Restore matrix power def __pow__(self, other): M, N = self.shape if M != N: raise TypeError('sparse matrix is not square') if isintlike(other): other = int(other) if other < 0: raise ValueError('exponent must be >= 0') if other == 0: from ._construct import eye return eye(M, dtype=self.dtype) if other == 1: return self.copy() tmp = self.__pow__(other // 2) if other % 2: return self @ tmp @ tmp else: return tmp @ tmp if isscalarlike(other): raise ValueError('exponent must be an integer') return NotImplemented ## Backward compatibility def set_shape(self, shape): """Set the shape of the matrix in-place""" # Make sure copy is False since this is in place # Make sure format is unchanged because we are doing a __dict__ swap new_self = self.reshape(shape, copy=False).asformat(self.format) self.__dict__ = new_self.__dict__ def get_shape(self): """Get the shape of the matrix""" return self._shape shape = property(fget=get_shape, fset=set_shape, doc="Shape of the matrix") def asfptype(self): """Upcast array to a floating point format (if necessary)""" return self._asfptype() def getmaxprint(self): """Maximum number of elements to display when printed.""" return self._getmaxprint() def getformat(self): """Matrix storage format""" return self.format def getnnz(self, axis=None): """Number of stored values, including explicit zeros. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row. """ return self._getnnz(axis=axis) def getH(self): """Return the Hermitian transpose of this array. See Also -------- numpy.matrix.getH : NumPy's implementation of `getH` for matrices """ return self.conjugate().transpose() def getcol(self, j): """Returns a copy of column j of the array, as an (m x 1) sparse array (column vector). """ return self._getcol(j) def getrow(self, i): """Returns a copy of row i of the array, as a (1 x n) sparse array (row vector). """ return self._getrow(i) def _array_doc_to_matrix(docstr): # For opimized builds with stripped docstrings if docstr is None: return None return ( docstr.replace('sparse arrays', 'sparse matrices') .replace('sparse array', 'sparse matrix') )
4,036
25.913333
76
py
scipy
scipy-main/scipy/sparse/dok.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _dok __all__ = [ # noqa: F822 'IndexMixin', 'check_shape', 'dok_matrix', 'get_index_dtype', 'getdtype', 'isdense', 'isintlike', 'isscalarlike', 'isshape', 'isspmatrix', 'isspmatrix_dok', 'itertools', 'spmatrix', 'upcast', 'upcast_scalar', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.dok is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.dok` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_dok, name)
980
21.813953
76
py
scipy
scipy-main/scipy/sparse/csc.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _csc __all__ = [ # noqa: F822 'csc_matrix', 'csc_tocsr', 'expandptr', 'get_index_dtype', 'isspmatrix_csc', 'spmatrix', 'upcast', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.csc is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.csc` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_csc, name)
838
22.971429
76
py
scipy
scipy-main/scipy/sparse/extract.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _extract __all__ = [ # noqa: F822 'coo_matrix', 'find', 'tril', 'triu', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.extract is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.extract` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_extract, name)
781
23.4375
76
py
scipy
scipy-main/scipy/sparse/_bsr.py
"""Compressed Block Sparse Row format""" __docformat__ = "restructuredtext en" __all__ = ['bsr_array', 'bsr_matrix', 'isspmatrix_bsr'] from warnings import warn import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._data import _data_matrix, _minmax_mixin from ._compressed import _cs_matrix from ._base import issparse, _formats, _spbase, sparray from ._sputils import (isshape, getdtype, getdata, to_native, upcast, check_shape) from . import _sparsetools from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz, bsr_matmat, bsr_transpose, bsr_sort_indices, bsr_tocsr) class _bsr_base(_cs_matrix, _minmax_mixin): """Block Sparse Row format sparse array. This can be instantiated in several ways: bsr_array(D, [blocksize=(R,C)]) where D is a dense matrix or 2-D ndarray. bsr_array(S, [blocksize=(R,C)]) with another sparse array S (equivalent to S.tobsr()) bsr_array((M, N), [blocksize=(R,C), dtype]) to construct an empty sparse array with shape (M, N) dtype is optional, defaulting to dtype='d'. bsr_array((data, ij), [blocksize=(R,C), shape=(M, N)]) where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` bsr_array((data, indices, indptr), [shape=(M, N)]) is the standard BSR representation where the block column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding block values are stored in ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not supplied, the array dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the array shape : 2-tuple Shape of the array ndim : int Number of dimensions (this is always 2) nnz Number of stored values, including explicit zeros data Data array indices BSR format index array indptr BSR format index pointer array blocksize Block size has_sorted_indices Whether indices are sorted Notes ----- Sparse arrays can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. **Summary of BSR format** The Block Compressed Row (BSR) format is very similar to the Compressed Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense sub matrices like the last example below. Block matrices often arise in vector-valued finite element discretizations. In such cases, BSR is considerably more efficient than CSR and CSC for many sparse arithmetic operations. **Blocksize** The blocksize (R,C) must evenly divide the shape of the sparse array (M,N). That is, R and C must satisfy the relationship ``M % R = 0`` and ``N % C = 0``. If no blocksize is specified, a simple heuristic is applied to determine an appropriate blocksize. **Canonical Format** In canonical format, there are no duplicate blocks and indices are sorted per row. Examples -------- >>> from scipy.sparse import bsr_array >>> import numpy as np >>> bsr_array((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 0, 1, 2, 2, 2]) >>> col = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3 ,4, 5, 6]) >>> bsr_array((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 2], [0, 0, 3], [4, 5, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) >>> bsr_array((data,indices,indptr), shape=(6, 6)).toarray() array([[1, 1, 0, 0, 2, 2], [1, 1, 0, 0, 2, 2], [0, 0, 0, 0, 3, 3], [0, 0, 0, 0, 3, 3], [4, 4, 5, 5, 6, 6], [4, 4, 5, 5, 6, 6]]) """ _format = 'bsr' def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): _data_matrix.__init__(self) if issparse(arg1): if arg1.format == self.format and copy: arg1 = arg1.copy() else: arg1 = arg1.tobsr(blocksize=blocksize) self._set_self(arg1) elif isinstance(arg1,tuple): if isshape(arg1): # it's a tuple of matrix dimensions (M,N) self._shape = check_shape(arg1) M,N = self.shape # process blocksize if blocksize is None: blocksize = (1,1) else: if not isshape(blocksize): raise ValueError('invalid blocksize=%s' % blocksize) blocksize = tuple(blocksize) self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float)) R,C = blocksize if (M % R) != 0 or (N % C) != 0: raise ValueError('shape must be multiple of blocksize') # Select index dtype large enough to pass array and # scalar parameters to sparsetools idx_dtype = self._get_index_dtype(maxval=max(M//R, N//C, R, C)) self.indices = np.zeros(0, dtype=idx_dtype) self.indptr = np.zeros(M//R + 1, dtype=idx_dtype) elif len(arg1) == 2: # (data,(row,col)) format self._set_self( self._coo_container(arg1, dtype=dtype, shape=shape).tobsr( blocksize=blocksize ) ) elif len(arg1) == 3: # (data,indices,indptr) format (data, indices, indptr) = arg1 # Select index dtype large enough to pass array and # scalar parameters to sparsetools maxval = 1 if shape is not None: maxval = max(shape) if blocksize is not None: maxval = max(maxval, max(blocksize)) idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval, check_contents=True) self.indices = np.array(indices, copy=copy, dtype=idx_dtype) self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) self.data = getdata(data, copy=copy, dtype=dtype) if self.data.ndim != 3: raise ValueError( 'BSR data must be 3-dimensional, got shape={}'.format( self.data.shape,)) if blocksize is not None: if not isshape(blocksize): raise ValueError(f'invalid blocksize={blocksize}') if tuple(blocksize) != self.data.shape[1:]: raise ValueError('mismatching blocksize={} vs {}'.format( blocksize, self.data.shape[1:])) else: raise ValueError('unrecognized bsr_array constructor usage') else: # must be dense try: arg1 = np.asarray(arg1) except Exception as e: raise ValueError("unrecognized form for" " %s_matrix constructor" % self.format) from e arg1 = self._coo_container( arg1, dtype=dtype ).tobsr(blocksize=blocksize) self._set_self(arg1) if shape is not None: self._shape = check_shape(shape) else: if self.shape is None: # shape not already set, try to infer dimensions try: M = len(self.indptr) - 1 N = self.indices.max() + 1 except Exception as e: raise ValueError('unable to infer matrix dimensions') from e else: R,C = self.blocksize self._shape = check_shape((M*R,N*C)) if self.shape is None: if shape is None: # TODO infer shape here raise ValueError('need to infer shape') else: self._shape = check_shape(shape) if dtype is not None: self.data = self.data.astype(dtype, copy=False) self.check_format(full_check=False) def check_format(self, full_check=True): """Check whether the matrix respects the BSR format. Parameters ---------- full_check : bool, optional If `True`, run rigorous check, scanning arrays for valid values. Note that activating those check might copy arrays for casting, modifying indices and index pointers' inplace. If `False`, run basic checks on attributes. O(1) operations. Default is `True`. """ M,N = self.shape R,C = self.blocksize # index arrays should have integer data types if self.indptr.dtype.kind != 'i': warn("indptr array has non-integer dtype (%s)" % self.indptr.dtype.name) if self.indices.dtype.kind != 'i': warn("indices array has non-integer dtype (%s)" % self.indices.dtype.name) # check array shapes if self.indices.ndim != 1 or self.indptr.ndim != 1: raise ValueError("indices, and indptr should be 1-D") if self.data.ndim != 3: raise ValueError("data should be 3-D") # check index pointer if (len(self.indptr) != M//R + 1): raise ValueError("index pointer size (%d) should be (%d)" % (len(self.indptr), M//R + 1)) if (self.indptr[0] != 0): raise ValueError("index pointer should start with 0") # check index and data arrays if (len(self.indices) != len(self.data)): raise ValueError("indices and data should have the same size") if (self.indptr[-1] > len(self.indices)): raise ValueError("Last value of index pointer should be less than " "the size of index and data arrays") self.prune() if full_check: # check format validity (more expensive) if self.nnz > 0: if self.indices.max() >= N//C: raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max())) if self.indices.min() < 0: raise ValueError("column index values must be >= 0") if np.diff(self.indptr).min() < 0: raise ValueError("index pointer values must form a " "non-decreasing sequence") idx_dtype = self._get_index_dtype((self.indices, self.indptr)) self.indptr = np.asarray(self.indptr, dtype=idx_dtype) self.indices = np.asarray(self.indices, dtype=idx_dtype) self.data = to_native(self.data) # if not self.has_sorted_indices(): # warn('Indices were not in sorted order. Sorting indices.') # self.sort_indices(check_first=False) def _get_blocksize(self): return self.data.shape[1:] blocksize = property(fget=_get_blocksize) def _getnnz(self, axis=None): if axis is not None: raise NotImplementedError("_getnnz over an axis is not implemented " "for BSR format") R,C = self.blocksize return int(self.indptr[-1] * R * C) _getnnz.__doc__ = _spbase._getnnz.__doc__ def __repr__(self): format = _formats[self.format][1] return ("<%dx%d sparse matrix of type '%s'\n" "\twith %d stored elements (blocksize = %dx%d) in %s format>" % (self.shape + (self.dtype.type, self.nnz) + self.blocksize + (format,))) def diagonal(self, k=0): rows, cols = self.shape if k <= -rows or k >= cols: return np.empty(0, dtype=self.data.dtype) R, C = self.blocksize y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), dtype=upcast(self.dtype)) _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C, self.indptr, self.indices, np.ravel(self.data), y) return y diagonal.__doc__ = _spbase.diagonal.__doc__ ########################## # NotImplemented methods # ########################## def __getitem__(self,key): raise NotImplementedError def __setitem__(self,key,val): raise NotImplementedError ###################### # Arithmetic methods # ###################### def _add_dense(self, other): return self.tocoo(copy=False)._add_dense(other) def _mul_vector(self, other): M,N = self.shape R,C = self.blocksize result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) bsr_matvec(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel(), other, result) return result def _mul_multivector(self,other): R,C = self.blocksize M,N = self.shape n_vecs = other.shape[1] # number of column vectors result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype)) bsr_matvecs(M//R, N//C, n_vecs, R, C, self.indptr, self.indices, self.data.ravel(), other.ravel(), result.ravel()) return result def _mul_sparse_matrix(self, other): M, K1 = self.shape K2, N = other.shape R,n = self.blocksize # convert to this format if other.format == "bsr": C = other.blocksize[1] else: C = 1 if other.format == "csr" and n == 1: other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion else: other = other.tobsr(blocksize=(n,C)) idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices)) bnnz = csr_matmat_maxnnz(M//R, N//C, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), other.indptr.astype(idx_dtype), other.indices.astype(idx_dtype)) idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=bnnz) indptr = np.empty(self.indptr.shape, dtype=idx_dtype) indices = np.empty(bnnz, dtype=idx_dtype) data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype)) bsr_matmat(bnnz, M//R, N//C, R, C, n, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), np.ravel(self.data), other.indptr.astype(idx_dtype), other.indices.astype(idx_dtype), np.ravel(other.data), indptr, indices, data) data = data.reshape(-1,R,C) # TODO eliminate zeros return self._bsr_container( (data, indices, indptr), shape=(M, N), blocksize=(R, C) ) ###################### # Conversion methods # ###################### def tobsr(self, blocksize=None, copy=False): """Convert this matrix into Block Sparse Row Format. With copy=False, the data/indices may be shared between this matrix and the resultant bsr_array. If blocksize=(R, C) is provided, it will be used for determining block size of the bsr_array. """ if blocksize not in [None, self.blocksize]: return self.tocsr().tobsr(blocksize=blocksize) if copy: return self.copy() else: return self def tocsr(self, copy=False): M, N = self.shape R, C = self.blocksize nnz = self.nnz idx_dtype = self._get_index_dtype((self.indptr, self.indices), maxval=max(nnz, N)) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty(nnz, dtype=idx_dtype) data = np.empty(nnz, dtype=upcast(self.dtype)) bsr_tocsr(M // R, # n_brow N // C, # n_bcol R, C, self.indptr.astype(idx_dtype, copy=False), self.indices.astype(idx_dtype, copy=False), self.data, indptr, indices, data) return self._csr_container((data, indices, indptr), shape=self.shape) tocsr.__doc__ = _spbase.tocsr.__doc__ def tocsc(self, copy=False): return self.tocsr(copy=False).tocsc(copy=copy) tocsc.__doc__ = _spbase.tocsc.__doc__ def tocoo(self, copy=True): """Convert this matrix to COOrdinate format. When copy=False the data array will be shared between this matrix and the resultant coo_matrix. """ M,N = self.shape R,C = self.blocksize indptr_diff = np.diff(self.indptr) if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize: # Check for potential overflow indptr_diff_limited = indptr_diff.astype(np.intp) if np.any(indptr_diff_limited != indptr_diff): raise ValueError("Matrix too big to convert") indptr_diff = indptr_diff_limited idx_dtype = self._get_index_dtype(maxval=max(M, N)) row = (R * np.arange(M//R, dtype=idx_dtype)).repeat(indptr_diff) row = row.repeat(R*C).reshape(-1,R,C) row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1,1), (1,C)) row = row.reshape(-1) col = (C * self.indices).astype(idx_dtype, copy=False).repeat(R*C).reshape(-1,R,C) col += np.tile(np.arange(C, dtype=idx_dtype), (R,1)) col = col.reshape(-1) data = self.data.reshape(-1) if copy: data = data.copy() return self._coo_container( (data, (row, col)), shape=self.shape ) def toarray(self, order=None, out=None): return self.tocoo(copy=False).toarray(order=order, out=out) toarray.__doc__ = _spbase.toarray.__doc__ def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") R, C = self.blocksize M, N = self.shape NBLK = self.nnz//(R*C) if self.nnz == 0: return self._bsr_container((N, M), blocksize=(C, R), dtype=self.dtype, copy=copy) indptr = np.empty(N//C + 1, dtype=self.indptr.dtype) indices = np.empty(NBLK, dtype=self.indices.dtype) data = np.empty((NBLK, C, R), dtype=self.data.dtype) bsr_transpose(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel(), indptr, indices, data.ravel()) return self._bsr_container((data, indices, indptr), shape=(N, M), copy=copy) transpose.__doc__ = _spbase.transpose.__doc__ ############################################################## # methods that examine or modify the internal data structure # ############################################################## def eliminate_zeros(self): """Remove zero elements in-place.""" if not self.nnz: return # nothing to do R,C = self.blocksize M,N = self.shape mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks nonzero_blocks = mask.nonzero()[0] self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] # modifies self.indptr and self.indices *in place* _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr, self.indices, mask) self.prune() def sum_duplicates(self): """Eliminate duplicate matrix entries by adding them together The is an *in place* operation """ if self.has_canonical_format: return self.sort_indices() R, C = self.blocksize M, N = self.shape # port of _sparsetools.csr_sum_duplicates n_row = M // R nnz = 0 row_end = 0 for i in range(n_row): jj = row_end row_end = self.indptr[i+1] while jj < row_end: j = self.indices[jj] x = self.data[jj] jj += 1 while jj < row_end and self.indices[jj] == j: x += self.data[jj] jj += 1 self.indices[nnz] = j self.data[nnz] = x nnz += 1 self.indptr[i+1] = nnz self.prune() # nnz may have changed self.has_canonical_format = True def sort_indices(self): """Sort the indices of this matrix *in place* """ if self.has_sorted_indices: return R,C = self.blocksize M,N = self.shape bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel()) self.has_sorted_indices = True def prune(self): """ Remove empty space after all non-zero elements. """ R,C = self.blocksize M,N = self.shape if len(self.indptr) != M//R + 1: raise ValueError("index pointer has invalid length") bnnz = self.indptr[-1] if len(self.indices) < bnnz: raise ValueError("indices array has too few elements") if len(self.data) < bnnz: raise ValueError("data array has too few elements") self.data = self.data[:bnnz] self.indices = self.indices[:bnnz] # utility functions def _binopt(self, other, op, in_shape=None, out_shape=None): """Apply the binary operation fn to two sparse matrices.""" # Ideally we'd take the GCDs of the blocksize dimensions # and explode self and other to match. other = self.__class__(other, blocksize=self.blocksize) # e.g. bsr_plus_bsr, etc. fn = getattr(_sparsetools, self.format + op + self.format) R,C = self.blocksize max_bnnz = len(self.data) + len(other.data) idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=max_bnnz) indptr = np.empty(self.indptr.shape, dtype=idx_dtype) indices = np.empty(max_bnnz, dtype=idx_dtype) bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] if op in bool_ops: data = np.empty(R*C*max_bnnz, dtype=np.bool_) else: data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) fn(self.shape[0]//R, self.shape[1]//C, R, C, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, other.indptr.astype(idx_dtype), other.indices.astype(idx_dtype), np.ravel(other.data), indptr, indices, data) actual_bnnz = indptr[-1] indices = indices[:actual_bnnz] data = data[:R*C*actual_bnnz] if actual_bnnz < max_bnnz/2: indices = indices.copy() data = data.copy() data = data.reshape(-1,R,C) return self.__class__((data, indices, indptr), shape=self.shape) # needed by _data_matrix def _with_data(self,data,copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays (i.e. .indptr and .indices) are copied. """ if copy: return self.__class__((data,self.indices.copy(),self.indptr.copy()), shape=self.shape,dtype=data.dtype) else: return self.__class__((data,self.indices,self.indptr), shape=self.shape,dtype=data.dtype) # # these functions are used by the parent class # # to remove redudancy between bsc_matrix and bsr_matrix # def _swap(self,x): # """swap the members of x if this is a column-oriented matrix # """ # return (x[0],x[1]) def isspmatrix_bsr(x): """Is `x` of a bsr_matrix type? Parameters ---------- x object to check for being a bsr matrix Returns ------- bool True if `x` is a bsr matrix, False otherwise Examples -------- >>> from scipy.sparse import bsr_array, bsr_matrix, csr_matrix, isspmatrix_bsr >>> isspmatrix_bsr(bsr_matrix([[5]])) True >>> isspmatrix_bsr(bsr_array([[5]])) False >>> isspmatrix_bsr(csr_matrix([[5]])) False """ return isinstance(x, bsr_matrix) # This namespace class separates array from matrix with isinstance class bsr_array(_bsr_base, sparray): pass bsr_array.__doc__ = _bsr_base.__doc__ class bsr_matrix(spmatrix, _bsr_base): pass bsr_matrix.__doc__ = _array_doc_to_matrix(_bsr_base.__doc__)
26,014
34.155405
114
py
scipy
scipy-main/scipy/sparse/data.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _data __all__ = [ # noqa: F822 'isscalarlike', 'matrix', 'name', 'npfunc', 'spmatrix', 'validateaxis', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.data is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.data` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_data, name)
811
22.882353
76
py
scipy
scipy-main/scipy/sparse/_dia.py
"""Sparse DIAgonal format""" __docformat__ = "restructuredtext en" __all__ = ['dia_array', 'dia_matrix', 'isspmatrix_dia'] import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._base import issparse, _formats, _spbase, sparray from ._data import _data_matrix from ._sputils import (isshape, upcast_char, getdtype, get_sum_dtype, validateaxis, check_shape) from ._sparsetools import dia_matvec class _dia_base(_data_matrix): """Sparse matrix with DIAgonal storage This can be instantiated in several ways: dia_array(D) with a dense matrix dia_array(S) with another sparse matrix S (equivalent to S.todia()) dia_array((M, N), [dtype]) to construct an empty matrix with shape (M, N), dtype is optional, defaulting to dtype='d'. dia_array((data, offsets), shape=(M, N)) where the ``data[k,:]`` stores the diagonal entries for diagonal ``offsets[k]`` (See example below) Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of stored values, including explicit zeros data DIA format data array of the matrix offsets DIA format offset array of the matrix Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Examples -------- >>> import numpy as np >>> from scipy.sparse import dia_array >>> dia_array((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) >>> offsets = np.array([0, -1, 2]) >>> dia_array((data, offsets), shape=(4, 4)).toarray() array([[1, 0, 3, 0], [1, 2, 0, 4], [0, 2, 3, 0], [0, 0, 3, 4]]) >>> from scipy.sparse import dia_array >>> n = 10 >>> ex = np.ones(n) >>> data = np.array([ex, 2 * ex, ex]) >>> offsets = np.array([-1, 0, 1]) >>> dia_array((data, offsets), shape=(n, n)).toarray() array([[2., 1., 0., ..., 0., 0., 0.], [1., 2., 1., ..., 0., 0., 0.], [0., 1., 2., ..., 0., 0., 0.], ..., [0., 0., 0., ..., 2., 1., 0.], [0., 0., 0., ..., 1., 2., 1.], [0., 0., 0., ..., 0., 1., 2.]]) """ _format = 'dia' def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) if issparse(arg1): if arg1.format == "dia": if copy: arg1 = arg1.copy() self.data = arg1.data self.offsets = arg1.offsets self._shape = check_shape(arg1.shape) else: if arg1.format == self.format and copy: A = arg1.copy() else: A = arg1.todia() self.data = A.data self.offsets = A.offsets self._shape = check_shape(A.shape) elif isinstance(arg1, tuple): if isshape(arg1): # It's a tuple of matrix dimensions (M, N) # create empty matrix self._shape = check_shape(arg1) self.data = np.zeros((0,0), getdtype(dtype, default=float)) idx_dtype = self._get_index_dtype(maxval=max(self.shape)) self.offsets = np.zeros((0), dtype=idx_dtype) else: try: # Try interpreting it as (data, offsets) data, offsets = arg1 except Exception as e: raise ValueError('unrecognized form for dia_array constructor') from e else: if shape is None: raise ValueError('expected a shape argument') self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy)) self.offsets = np.atleast_1d(np.array(arg1[1], dtype=self._get_index_dtype(maxval=max(shape)), copy=copy)) self._shape = check_shape(shape) else: #must be dense, convert to COO first, then to DIA try: arg1 = np.asarray(arg1) except Exception as e: raise ValueError("unrecognized form for" " %s_matrix constructor" % self.format) from e A = self._coo_container(arg1, dtype=dtype, shape=shape).todia() self.data = A.data self.offsets = A.offsets self._shape = check_shape(A.shape) if dtype is not None: self.data = self.data.astype(dtype) #check format if self.offsets.ndim != 1: raise ValueError('offsets array must have rank 1') if self.data.ndim != 2: raise ValueError('data array must have rank 2') if self.data.shape[0] != len(self.offsets): raise ValueError('number of diagonals (%d) ' 'does not match the number of offsets (%d)' % (self.data.shape[0], len(self.offsets))) if len(np.unique(self.offsets)) != len(self.offsets): raise ValueError('offset array contains duplicate values') def __repr__(self): format = _formats[self.getformat()][1] return "<%dx%d sparse matrix of type '%s'\n" \ "\twith %d stored elements (%d diagonals) in %s format>" % \ (self.shape + (self.dtype.type, self.nnz, self.data.shape[0], format)) def _data_mask(self): """Returns a mask of the same shape as self.data, where mask[i,j] is True when data[i,j] corresponds to a stored element.""" num_rows, num_cols = self.shape offset_inds = np.arange(self.data.shape[1]) row = offset_inds - self.offsets[:,None] mask = (row >= 0) mask &= (row < num_rows) mask &= (offset_inds < num_cols) return mask def count_nonzero(self): mask = self._data_mask() return np.count_nonzero(self.data[mask]) def _getnnz(self, axis=None): if axis is not None: raise NotImplementedError("_getnnz over an axis is not implemented " "for DIA format") M,N = self.shape nnz = 0 for k in self.offsets: if k > 0: nnz += min(M,N-k) else: nnz += min(M+k,N) return int(nnz) _getnnz.__doc__ = _spbase._getnnz.__doc__ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ def sum(self, axis=None, dtype=None, out=None): validateaxis(axis) if axis is not None and axis < 0: axis += 2 res_dtype = get_sum_dtype(self.dtype) num_rows, num_cols = self.shape ret = None if axis == 0: mask = self._data_mask() x = (self.data * mask).sum(axis=0) if x.shape[0] == num_cols: res = x else: res = np.zeros(num_cols, dtype=x.dtype) res[:x.shape[0]] = x ret = self._ascontainer(res, dtype=res_dtype) else: row_sums = np.zeros((num_rows, 1), dtype=res_dtype) one = np.ones(num_cols, dtype=res_dtype) dia_matvec(num_rows, num_cols, len(self.offsets), self.data.shape[1], self.offsets, self.data, one, row_sums) row_sums = self._ascontainer(row_sums) if axis is None: return row_sums.sum(dtype=dtype, out=out) ret = self._ascontainer(row_sums.sum(axis=axis)) if out is not None and out.shape != ret.shape: raise ValueError("dimensions do not match") return ret.sum(axis=(), dtype=dtype, out=out) sum.__doc__ = _spbase.sum.__doc__ def _add_sparse(self, other): # Check if other is also of type dia_array if not isinstance(other, type(self)): # If other is not of type dia_array, default to # converting to csr_matrix, as is done in the _add_sparse # method of parent class _spbase return self.tocsr()._add_sparse(other) # The task is to compute m = self + other # Start by making a copy of self, of the datatype # that should result from adding self and other dtype = np.promote_types(self.dtype, other.dtype) m = self.astype(dtype, copy=True) # Then, add all the stored diagonals of other. for d in other.offsets: # Check if the diagonal has already been added. if d in m.offsets: # If the diagonal is already there, we need to take # the sum of the existing and the new m.setdiag(m.diagonal(d) + other.diagonal(d), d) else: m.setdiag(other.diagonal(d), d) return m def _mul_vector(self, other): x = other y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, x.dtype.char)) L = self.data.shape[1] M,N = self.shape dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel()) return y def _mul_multimatrix(self, other): return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T]) def _setdiag(self, values, k=0): M, N = self.shape if values.ndim == 0: # broadcast values_n = np.inf else: values_n = len(values) if k < 0: n = min(M + k, N, values_n) min_index = 0 max_index = n else: n = min(M, N - k, values_n) min_index = k max_index = k + n if values.ndim != 0: # allow also longer sequences values = values[:n] data_rows, data_cols = self.data.shape if k in self.offsets: if max_index > data_cols: data = np.zeros((data_rows, max_index), dtype=self.data.dtype) data[:, :data_cols] = self.data self.data = data self.data[self.offsets == k, min_index:max_index] = values else: self.offsets = np.append(self.offsets, self.offsets.dtype.type(k)) m = max(max_index, data_cols) data = np.zeros((data_rows + 1, m), dtype=self.data.dtype) data[:-1, :data_cols] = self.data data[-1, min_index:max_index] = values self.data = data def todia(self, copy=False): if copy: return self.copy() else: return self todia.__doc__ = _spbase.todia.__doc__ def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") num_rows, num_cols = self.shape max_dim = max(self.shape) # flip diagonal offsets offsets = -self.offsets # re-align the data matrix r = np.arange(len(offsets), dtype=np.intc)[:, None] c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None] pad_amount = max(0, max_dim-self.data.shape[1]) data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount), dtype=self.data.dtype))) data = data[r, c] return self._dia_container((data, offsets), shape=( num_cols, num_rows), copy=copy) transpose.__doc__ = _spbase.transpose.__doc__ def diagonal(self, k=0): rows, cols = self.shape if k <= -rows or k >= cols: return np.empty(0, dtype=self.data.dtype) idx, = np.nonzero(self.offsets == k) first_col = max(0, k) last_col = min(rows + k, cols) result_size = last_col - first_col if idx.size == 0: return np.zeros(result_size, dtype=self.data.dtype) result = self.data[idx[0], first_col:last_col] padding = result_size - len(result) if padding > 0: result = np.pad(result, (0, padding), mode='constant') return result diagonal.__doc__ = _spbase.diagonal.__doc__ def tocsc(self, copy=False): if self.nnz == 0: return self._csc_container(self.shape, dtype=self.dtype) num_rows, num_cols = self.shape num_offsets, offset_len = self.data.shape offset_inds = np.arange(offset_len) row = offset_inds - self.offsets[:,None] mask = (row >= 0) mask &= (row < num_rows) mask &= (offset_inds < num_cols) mask &= (self.data != 0) idx_dtype = self._get_index_dtype(maxval=max(self.shape)) indptr = np.zeros(num_cols + 1, dtype=idx_dtype) indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)[:num_cols]) if offset_len < num_cols: indptr[offset_len+1:] = indptr[offset_len] indices = row.T[mask.T].astype(idx_dtype, copy=False) data = self.data.T[mask.T] return self._csc_container((data, indices, indptr), shape=self.shape, dtype=self.dtype) tocsc.__doc__ = _spbase.tocsc.__doc__ def tocoo(self, copy=False): num_rows, num_cols = self.shape num_offsets, offset_len = self.data.shape offset_inds = np.arange(offset_len) row = offset_inds - self.offsets[:,None] mask = (row >= 0) mask &= (row < num_rows) mask &= (offset_inds < num_cols) mask &= (self.data != 0) row = row[mask] col = np.tile(offset_inds, num_offsets)[mask.ravel()] data = self.data[mask] # Note: this cannot set has_canonical_format=True, because despite the # lack of duplicates, we do not generate sorted indices. return self._coo_container( (data, (row, col)), shape=self.shape, dtype=self.dtype, copy=False ) tocoo.__doc__ = _spbase.tocoo.__doc__ # needed by _data_matrix def _with_data(self, data, copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays are copied. """ if copy: return self._dia_container( (data, self.offsets.copy()), shape=self.shape ) else: return self._dia_container( (data, self.offsets), shape=self.shape ) def resize(self, *shape): shape = check_shape(shape) M, N = shape # we do not need to handle the case of expanding N self.data = self.data[:, :N] if (M > self.shape[0] and np.any(self.offsets + self.shape[0] < self.data.shape[1])): # explicitly clear values that were previously hidden mask = (self.offsets[:, None] + self.shape[0] <= np.arange(self.data.shape[1])) self.data[mask] = 0 self._shape = shape resize.__doc__ = _spbase.resize.__doc__ def isspmatrix_dia(x): """Is `x` of dia_matrix type? Parameters ---------- x object to check for being a dia matrix Returns ------- bool True if `x` is a dia matrix, False otherwise Examples -------- >>> from scipy.sparse import dia_array, dia_matrix, coo_matrix, isspmatrix_dia >>> isspmatrix_dia(dia_matrix([[5]])) True >>> isspmatrix_dia(dia_array([[5]])) False >>> isspmatrix_dia(coo_matrix([[5]])) False """ return isinstance(x, dia_matrix) # This namespace class separates array from matrix with isinstance class dia_array(_dia_base, sparray): pass dia_array.__doc__ = _dia_base.__doc__ class dia_matrix(spmatrix, _dia_base): pass dia_matrix.__doc__ = _array_doc_to_matrix(_dia_base.__doc__)
16,458
33.147303
105
py
scipy
scipy-main/scipy/sparse/_lil.py
"""List of Lists sparse matrix class """ __docformat__ = "restructuredtext en" __all__ = ['lil_array', 'lil_matrix', 'isspmatrix_lil'] from bisect import bisect_left import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._base import _spbase, sparray, issparse from ._index import IndexMixin, INT_TYPES, _broadcast_arrays from ._sputils import (getdtype, isshape, isscalarlike, upcast_scalar, check_shape, check_reshape_kwargs) from . import _csparsetools class _lil_base(_spbase, IndexMixin): """Row-based LIst of Lists sparse matrix This is a structure for constructing sparse matrices incrementally. Note that inserting a single item can take linear time in the worst case; to construct a matrix efficiently, make sure the items are pre-sorted by index, per row. This can be instantiated in several ways: lil_array(D) with a dense matrix or rank-2 ndarray D lil_array(S) with another sparse matrix S (equivalent to S.tolil()) lil_array((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of stored values, including explicit zeros data LIL format data array of the matrix rows LIL format row index array of the matrix Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the LIL format - supports flexible slicing - changes to the matrix sparsity structure are efficient Disadvantages of the LIL format - arithmetic operations LIL + LIL are slow (consider CSR or CSC) - slow column slicing (consider CSC) - slow matrix vector products (consider CSR or CSC) Intended Usage - LIL is a convenient format for constructing sparse matrices - once a matrix has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - consider using the COO format when constructing large matrices Data Structure - An array (``self.rows``) of rows, each of which is a sorted list of column indices of non-zero elements. - The corresponding nonzero values are stored in similar fashion in ``self.data``. """ _format = 'lil' def __init__(self, arg1, shape=None, dtype=None, copy=False): _spbase.__init__(self) self.dtype = getdtype(dtype, arg1, default=float) # First get the shape if issparse(arg1): if arg1.format == "lil" and copy: A = arg1.copy() else: A = arg1.tolil() if dtype is not None: A = A.astype(dtype, copy=False) self._shape = check_shape(A.shape) self.dtype = A.dtype self.rows = A.rows self.data = A.data elif isinstance(arg1,tuple): if isshape(arg1): if shape is not None: raise ValueError('invalid use of shape parameter') M, N = arg1 self._shape = check_shape((M, N)) self.rows = np.empty((M,), dtype=object) self.data = np.empty((M,), dtype=object) for i in range(M): self.rows[i] = [] self.data[i] = [] else: raise TypeError('unrecognized lil_array constructor usage') else: # assume A is dense try: A = self._ascontainer(arg1) except TypeError as e: raise TypeError('unsupported matrix type') from e else: A = self._csr_container(A, dtype=dtype).tolil() self._shape = check_shape(A.shape) self.dtype = A.dtype self.rows = A.rows self.data = A.data def __iadd__(self,other): self[:,:] = self + other return self def __isub__(self,other): self[:,:] = self - other return self def __imul__(self,other): if isscalarlike(other): self[:,:] = self * other return self else: return NotImplemented def __itruediv__(self,other): if isscalarlike(other): self[:,:] = self / other return self else: return NotImplemented # Whenever the dimensions change, empty lists should be created for each # row def _getnnz(self, axis=None): if axis is None: return sum([len(rowvals) for rowvals in self.data]) if axis < 0: axis += 2 if axis == 0: out = np.zeros(self.shape[1], dtype=np.intp) for row in self.rows: out[row] += 1 return out elif axis == 1: return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp) else: raise ValueError('axis out of bounds') def count_nonzero(self): return sum(np.count_nonzero(rowvals) for rowvals in self.data) _getnnz.__doc__ = _spbase._getnnz.__doc__ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ def __str__(self): val = '' for i, row in enumerate(self.rows): for pos, j in enumerate(row): val += f" {str((i, j))}\t{str(self.data[i][pos])}\n" return val[:-1] def getrowview(self, i): """Returns a view of the 'i'th row (without copying). """ new = self._lil_container((1, self.shape[1]), dtype=self.dtype) new.rows[0] = self.rows[i] new.data[0] = self.data[i] return new def getrow(self, i): """Returns a copy of the 'i'th row. """ M, N = self.shape if i < 0: i += M if i < 0 or i >= M: raise IndexError('row index out of bounds') new = self._lil_container((1, N), dtype=self.dtype) new.rows[0] = self.rows[i][:] new.data[0] = self.data[i][:] return new def __getitem__(self, key): # Fast path for simple (int, int) indexing. if (isinstance(key, tuple) and len(key) == 2 and isinstance(key[0], INT_TYPES) and isinstance(key[1], INT_TYPES)): # lil_get1 handles validation for us. return self._get_intXint(*key) # Everything else takes the normal path. return IndexMixin.__getitem__(self, key) def _asindices(self, idx, N): # LIL routines handle bounds-checking for us, so don't do it here. try: x = np.asarray(idx) except (ValueError, TypeError, MemoryError) as e: raise IndexError('invalid index') from e if x.ndim not in (1, 2): raise IndexError('Index dimension must be <= 2') return x def _get_intXint(self, row, col): v = _csparsetools.lil_get1(self.shape[0], self.shape[1], self.rows, self.data, row, col) return self.dtype.type(v) def _get_sliceXint(self, row, col): row = range(*row.indices(self.shape[0])) return self._get_row_ranges(row, slice(col, col+1)) def _get_arrayXint(self, row, col): row = row.squeeze() return self._get_row_ranges(row, slice(col, col+1)) def _get_intXslice(self, row, col): return self._get_row_ranges((row,), col) def _get_sliceXslice(self, row, col): row = range(*row.indices(self.shape[0])) return self._get_row_ranges(row, col) def _get_arrayXslice(self, row, col): return self._get_row_ranges(row, col) def _get_intXarray(self, row, col): row = np.array(row, dtype=col.dtype, ndmin=1) return self._get_columnXarray(row, col) def _get_sliceXarray(self, row, col): row = np.arange(*row.indices(self.shape[0])) return self._get_columnXarray(row, col) def _get_columnXarray(self, row, col): # outer indexing row, col = _broadcast_arrays(row[:,None], col) return self._get_arrayXarray(row, col) def _get_arrayXarray(self, row, col): # inner indexing i, j = map(np.atleast_2d, _prepare_index_for_memoryview(row, col)) new = self._lil_container(i.shape, dtype=self.dtype) _csparsetools.lil_fancy_get(self.shape[0], self.shape[1], self.rows, self.data, new.rows, new.data, i, j) return new def _get_row_ranges(self, rows, col_slice): """ Fast path for indexing in the case where column index is slice. This gains performance improvement over brute force by more efficient skipping of zeros, by accessing the elements column-wise in order. Parameters ---------- rows : sequence or range Rows indexed. If range, must be within valid bounds. col_slice : slice Columns indexed """ j_start, j_stop, j_stride = col_slice.indices(self.shape[1]) col_range = range(j_start, j_stop, j_stride) nj = len(col_range) new = self._lil_container((len(rows), nj), dtype=self.dtype) _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1], self.rows, self.data, new.rows, new.data, rows, j_start, j_stop, j_stride, nj) return new def _set_intXint(self, row, col, x): _csparsetools.lil_insert(self.shape[0], self.shape[1], self.rows, self.data, row, col, x) def _set_arrayXarray(self, row, col, x): i, j, x = map(np.atleast_2d, _prepare_index_for_memoryview(row, col, x)) _csparsetools.lil_fancy_set(self.shape[0], self.shape[1], self.rows, self.data, i, j, x) def _set_arrayXarray_sparse(self, row, col, x): # Fall back to densifying x x = np.asarray(x.toarray(), dtype=self.dtype) x, _ = _broadcast_arrays(x, row) self._set_arrayXarray(row, col, x) def __setitem__(self, key, x): if isinstance(key, tuple) and len(key) == 2: row, col = key # Fast path for simple (int, int) indexing. if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES): x = self.dtype.type(x) if x.size > 1: raise ValueError("Trying to assign a sequence to an item") return self._set_intXint(row, col, x) # Fast path for full-matrix sparse assignment. if (isinstance(row, slice) and isinstance(col, slice) and row == slice(None) and col == slice(None) and issparse(x) and x.shape == self.shape): x = self._lil_container(x, dtype=self.dtype) self.rows = x.rows self.data = x.data return # Everything else takes the normal path. IndexMixin.__setitem__(self, key, x) def _mul_scalar(self, other): if other == 0: # Multiply by zero: return the zero matrix new = self._lil_container(self.shape, dtype=self.dtype) else: res_dtype = upcast_scalar(self.dtype, other) new = self.copy() new = new.astype(res_dtype) # Multiply this scalar by every element. for j, rowvals in enumerate(new.data): new.data[j] = [val*other for val in rowvals] return new def __truediv__(self, other): # self / other if isscalarlike(other): new = self.copy() # Divide every element by this scalar for j, rowvals in enumerate(new.data): new.data[j] = [val/other for val in rowvals] return new else: return self.tocsr() / other def copy(self): M, N = self.shape new = self._lil_container(self.shape, dtype=self.dtype) # This is ~14x faster than calling deepcopy() on rows and data. _csparsetools.lil_get_row_ranges(M, N, self.rows, self.data, new.rows, new.data, range(M), 0, N, 1, N) return new copy.__doc__ = _spbase.copy.__doc__ def reshape(self, *args, **kwargs): shape = check_shape(args, self.shape) order, copy = check_reshape_kwargs(kwargs) # Return early if reshape is not required if shape == self.shape: if copy: return self.copy() else: return self new = self._lil_container(shape, dtype=self.dtype) if order == 'C': ncols = self.shape[1] for i, row in enumerate(self.rows): for col, j in enumerate(row): new_r, new_c = np.unravel_index(i * ncols + j, shape) new[new_r, new_c] = self[i, j] elif order == 'F': nrows = self.shape[0] for i, row in enumerate(self.rows): for col, j in enumerate(row): new_r, new_c = np.unravel_index(i + j * nrows, shape, order) new[new_r, new_c] = self[i, j] else: raise ValueError("'order' must be 'C' or 'F'") return new reshape.__doc__ = _spbase.reshape.__doc__ def resize(self, *shape): shape = check_shape(shape) new_M, new_N = shape M, N = self.shape if new_M < M: self.rows = self.rows[:new_M] self.data = self.data[:new_M] elif new_M > M: self.rows = np.resize(self.rows, new_M) self.data = np.resize(self.data, new_M) for i in range(M, new_M): self.rows[i] = [] self.data[i] = [] if new_N < N: for row, data in zip(self.rows, self.data): trunc = bisect_left(row, new_N) del row[trunc:] del data[trunc:] self._shape = shape resize.__doc__ = _spbase.resize.__doc__ def toarray(self, order=None, out=None): d = self._process_toarray_args(order, out) for i, row in enumerate(self.rows): for pos, j in enumerate(row): d[i, j] = self.data[i][pos] return d toarray.__doc__ = _spbase.toarray.__doc__ def transpose(self, axes=None, copy=False): return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False) transpose.__doc__ = _spbase.transpose.__doc__ def tolil(self, copy=False): if copy: return self.copy() else: return self tolil.__doc__ = _spbase.tolil.__doc__ def tocsr(self, copy=False): M, N = self.shape if M == 0 or N == 0: return self._csr_container((M, N), dtype=self.dtype) # construct indptr array if M*N <= np.iinfo(np.int32).max: # fast path: it is known that 64-bit indexing will not be needed. idx_dtype = np.int32 indptr = np.empty(M + 1, dtype=idx_dtype) indptr[0] = 0 _csparsetools.lil_get_lengths(self.rows, indptr[1:]) np.cumsum(indptr, out=indptr) nnz = indptr[-1] else: idx_dtype = self._get_index_dtype(maxval=N) lengths = np.empty(M, dtype=idx_dtype) _csparsetools.lil_get_lengths(self.rows, lengths) nnz = lengths.sum(dtype=np.int64) idx_dtype = self._get_index_dtype(maxval=max(N, nnz)) indptr = np.empty(M + 1, dtype=idx_dtype) indptr[0] = 0 np.cumsum(lengths, dtype=idx_dtype, out=indptr[1:]) indices = np.empty(nnz, dtype=idx_dtype) data = np.empty(nnz, dtype=self.dtype) _csparsetools.lil_flatten_to_array(self.rows, indices) _csparsetools.lil_flatten_to_array(self.data, data) # init csr matrix return self._csr_container((data, indices, indptr), shape=self.shape) tocsr.__doc__ = _spbase.tocsr.__doc__ def _prepare_index_for_memoryview(i, j, x=None): """ Convert index and data arrays to form suitable for passing to the Cython fancy getset routines. The conversions are necessary since to (i) ensure the integer index arrays are in one of the accepted types, and (ii) to ensure the arrays are writable so that Cython memoryview support doesn't choke on them. Parameters ---------- i, j Index arrays x : optional Data arrays Returns ------- i, j, x Re-formatted arrays (x is omitted, if input was None) """ if i.dtype > j.dtype: j = j.astype(i.dtype) elif i.dtype < j.dtype: i = i.astype(j.dtype) if not i.flags.writeable or i.dtype not in (np.int32, np.int64): i = i.astype(np.intp) if not j.flags.writeable or j.dtype not in (np.int32, np.int64): j = j.astype(np.intp) if x is not None: if not x.flags.writeable: x = x.copy() return i, j, x else: return i, j def isspmatrix_lil(x): """Is `x` of lil_matrix type? Parameters ---------- x object to check for being a lil matrix Returns ------- bool True if `x` is a lil matrix, False otherwise Examples -------- >>> from scipy.sparse import lil_array, lil_matrix, coo_matrix, isspmatrix_lil >>> isspmatrix_lil(lil_matrix([[5]])) True >>> isspmatrix_lil(lil_array([[5]])) False >>> isspmatrix_lil(coo_matrix([[5]])) False """ return isinstance(x, lil_matrix) # This namespace class separates array from matrix with isinstance class lil_array(_lil_base, sparray): pass lil_array.__doc__ = _lil_base.__doc__ class lil_matrix(spmatrix, _lil_base): pass lil_matrix.__doc__ = _array_doc_to_matrix(_lil_base.__doc__)
18,618
32.248214
87
py
scipy
scipy-main/scipy/sparse/construct.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _construct __all__ = [ # noqa: F822 'block_diag', 'bmat', 'bsr_matrix', 'check_random_state', 'coo_matrix', 'csc_matrix', 'csr_hstack', 'csr_matrix', 'dia_matrix', 'diags', 'eye', 'get_index_dtype', 'hstack', 'identity', 'isscalarlike', 'issparse', 'kron', 'kronsum', 'numbers', 'partial', 'rand', 'random', 'rng_integers', 'spdiags', 'upcast', 'vstack', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.construct is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.construct` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_construct, name)
1,158
20.462963
76
py
scipy
scipy-main/scipy/sparse/_generate_sparsetools.py
""" python generate_sparsetools.py Generate manual wrappers for C++ sparsetools code. Type codes used: 'i': integer scalar 'I': integer array 'T': data array 'B': boolean array 'V': std::vector<integer>* 'W': std::vector<data>* '*': indicates that the next argument is an output argument 'v': void 'l': 64-bit integer scalar See sparsetools.cxx for more details. """ import optparse import os from stat import ST_MTIME # # List of all routines and their argument types. # # The first code indicates the return value, the rest the arguments. # # bsr.h BSR_ROUTINES = """ bsr_diagonal v iiiiiIIT*T bsr_tocsr v iiiiIIT*I*I*T bsr_scale_rows v iiiiII*TT bsr_scale_columns v iiiiII*TT bsr_sort_indices v iiii*I*I*T bsr_transpose v iiiiIIT*I*I*T bsr_matmat v iiiiiiIITIIT*I*I*T bsr_matvec v iiiiIITT*T bsr_matvecs v iiiiiIITT*T bsr_elmul_bsr v iiiiIITIIT*I*I*T bsr_eldiv_bsr v iiiiIITIIT*I*I*T bsr_plus_bsr v iiiiIITIIT*I*I*T bsr_minus_bsr v iiiiIITIIT*I*I*T bsr_maximum_bsr v iiiiIITIIT*I*I*T bsr_minimum_bsr v iiiiIITIIT*I*I*T bsr_ne_bsr v iiiiIITIIT*I*I*B bsr_lt_bsr v iiiiIITIIT*I*I*B bsr_gt_bsr v iiiiIITIIT*I*I*B bsr_le_bsr v iiiiIITIIT*I*I*B bsr_ge_bsr v iiiiIITIIT*I*I*B """ # csc.h CSC_ROUTINES = """ csc_diagonal v iiiIIT*T csc_tocsr v iiIIT*I*I*T csc_matmat_maxnnz l iiIIII csc_matmat v iiIITIIT*I*I*T csc_matvec v iiIITT*T csc_matvecs v iiiIITT*T csc_elmul_csc v iiIITIIT*I*I*T csc_eldiv_csc v iiIITIIT*I*I*T csc_plus_csc v iiIITIIT*I*I*T csc_minus_csc v iiIITIIT*I*I*T csc_maximum_csc v iiIITIIT*I*I*T csc_minimum_csc v iiIITIIT*I*I*T csc_ne_csc v iiIITIIT*I*I*B csc_lt_csc v iiIITIIT*I*I*B csc_gt_csc v iiIITIIT*I*I*B csc_le_csc v iiIITIIT*I*I*B csc_ge_csc v iiIITIIT*I*I*B """ # csr.h CSR_ROUTINES = """ csr_matmat_maxnnz l iiIIII csr_matmat v iiIITIIT*I*I*T csr_diagonal v iiiIIT*T csr_tocsc v iiIIT*I*I*T csr_tobsr v iiiiIIT*I*I*T csr_todense v iiIIT*T csr_matvec v iiIITT*T csr_matvecs v iiiIITT*T csr_elmul_csr v iiIITIIT*I*I*T csr_eldiv_csr v iiIITIIT*I*I*T csr_plus_csr v iiIITIIT*I*I*T csr_minus_csr v iiIITIIT*I*I*T csr_maximum_csr v iiIITIIT*I*I*T csr_minimum_csr v iiIITIIT*I*I*T csr_ne_csr v iiIITIIT*I*I*B csr_lt_csr v iiIITIIT*I*I*B csr_gt_csr v iiIITIIT*I*I*B csr_le_csr v iiIITIIT*I*I*B csr_ge_csr v iiIITIIT*I*I*B csr_scale_rows v iiII*TT csr_scale_columns v iiII*TT csr_sort_indices v iI*I*T csr_eliminate_zeros v ii*I*I*T csr_sum_duplicates v ii*I*I*T get_csr_submatrix v iiIITiiii*V*V*W csr_row_index v iIIIT*I*T csr_row_slice v iiiIIT*I*T csr_column_index1 v iIiiII*I*I csr_column_index2 v IIiIT*I*T csr_sample_values v iiIITiII*T csr_count_blocks i iiiiII csr_sample_offsets i iiIIiII*I csr_hstack v iiIIIT*I*I*T expandptr v iI*I test_throw_error i csr_has_sorted_indices i iII csr_has_canonical_format i iII """ # coo.h, dia.h, csgraph.h OTHER_ROUTINES = """ coo_tocsr v iiiIIT*I*I*T coo_todense v iilIIT*Ti coo_matvec v lIITT*T dia_matvec v iiiiITT*T cs_graph_components i iII*I """ # List of compilation units COMPILATION_UNITS = [ ('bsr', BSR_ROUTINES), ('csr', CSR_ROUTINES), ('csc', CSC_ROUTINES), ('other', OTHER_ROUTINES), ] # # List of the supported index typenums and the corresponding C++ types # I_TYPES = [ ('NPY_INT32', 'npy_int32'), ('NPY_INT64', 'npy_int64'), ] # # List of the supported data typenums and the corresponding C++ types # T_TYPES = [ ('NPY_BOOL', 'npy_bool_wrapper'), ('NPY_BYTE', 'npy_byte'), ('NPY_UBYTE', 'npy_ubyte'), ('NPY_SHORT', 'npy_short'), ('NPY_USHORT', 'npy_ushort'), ('NPY_INT', 'npy_int'), ('NPY_UINT', 'npy_uint'), ('NPY_LONG', 'npy_long'), ('NPY_ULONG', 'npy_ulong'), ('NPY_LONGLONG', 'npy_longlong'), ('NPY_ULONGLONG', 'npy_ulonglong'), ('NPY_FLOAT', 'npy_float'), ('NPY_DOUBLE', 'npy_double'), ('NPY_LONGDOUBLE', 'npy_longdouble'), ('NPY_CFLOAT', 'npy_cfloat_wrapper'), ('NPY_CDOUBLE', 'npy_cdouble_wrapper'), ('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'), ] # # Code templates # THUNK_TEMPLATE = """ static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a) { %(thunk_content)s } """ METHOD_TEMPLATE = """ NPY_VISIBILITY_HIDDEN PyObject * %(name)s_method(PyObject *self, PyObject *args) { return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args); } """ GET_THUNK_CASE_TEMPLATE = """ static int get_thunk_case(int I_typenum, int T_typenum) { %(content)s; return -1; } """ # # Code generation # def newer(source, target): """ Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'. """ if not os.path.exists(source): raise ValueError("file '%s' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return 1 mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return mtime1 > mtime2 def get_thunk_type_set(): """ Get a list containing cartesian product of data types, plus a getter routine. Returns ------- i_types : list [(j, I_typenum, None, I_type, None), ...] Pairing of index type numbers and the corresponding C++ types, and an unique index `j`. This is for routines that are parameterized only by I but not by T. it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...] Same as `i_types`, but for routines parameterized both by T and I. getter_code : str C++ code for a function that takes I_typenum, T_typenum and returns the unique index corresponding to the lists, or -1 if no match was found. """ it_types = [] i_types = [] j = 0 getter_code = " if (0) {}" for I_typenum, I_type in I_TYPES: piece = """ else if (I_typenum == %(I_typenum)s) { if (T_typenum == -1) { return %(j)s; }""" getter_code += piece % dict(I_typenum=I_typenum, j=j) i_types.append((j, I_typenum, None, I_type, None)) j += 1 for T_typenum, T_type in T_TYPES: piece = """ else if (T_typenum == %(T_typenum)s) { return %(j)s; }""" getter_code += piece % dict(T_typenum=T_typenum, j=j) it_types.append((j, I_typenum, T_typenum, I_type, T_type)) j += 1 getter_code += """ }""" return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code) def parse_routine(name, args, types): """ Generate thunk and method code for a given routine. Parameters ---------- name : str Name of the C++ routine args : str Argument list specification (in format explained above) types : list List of types to instantiate, as returned `get_thunk_type_set` """ ret_spec = args[0] arg_spec = args[1:] def get_arglist(I_type, T_type): """ Generate argument list for calling the C++ function """ args = [] next_is_writeable = False j = 0 for t in arg_spec: const = '' if next_is_writeable else 'const ' next_is_writeable = False if t == '*': next_is_writeable = True continue elif t == 'i': args.append("*(%s*)a[%d]" % (const + I_type, j)) elif t == 'I': args.append("(%s*)a[%d]" % (const + I_type, j)) elif t == 'T': args.append("(%s*)a[%d]" % (const + T_type, j)) elif t == 'B': args.append("(npy_bool_wrapper*)a[%d]" % (j,)) elif t == 'V': if const: raise ValueError("'V' argument must be an output arg") args.append("(std::vector<%s>*)a[%d]" % (I_type, j,)) elif t == 'W': if const: raise ValueError("'W' argument must be an output arg") args.append("(std::vector<%s>*)a[%d]" % (T_type, j,)) elif t == 'l': args.append("*(%snpy_int64*)a[%d]" % (const, j)) else: raise ValueError(f"Invalid spec character {t!r}") j += 1 return ", ".join(args) # Generate thunk code: a giant switch statement with different # type combinations inside. thunk_content = """int j = get_thunk_case(I_typenum, T_typenum); switch (j) {""" for j, I_typenum, T_typenum, I_type, T_type in types: arglist = get_arglist(I_type, T_type) piece = """ case %(j)s:""" if ret_spec == 'v': piece += """ (void)%(name)s(%(arglist)s); return 0;""" else: piece += """ return %(name)s(%(arglist)s);""" thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type, I_typenum=I_typenum, T_typenum=T_typenum, arglist=arglist, name=name) thunk_content += """ default: throw std::runtime_error("internal error: invalid argument typenums"); }""" thunk_code = THUNK_TEMPLATE % dict(name=name, thunk_content=thunk_content) # Generate method code method_code = METHOD_TEMPLATE % dict(name=name, ret_spec=ret_spec, arg_spec=arg_spec) return thunk_code, method_code def main(): p = optparse.OptionParser(usage=(__doc__ or '').strip()) p.add_option("--no-force", action="store_false", dest="force", default=True) p.add_option("-o", "--outdir", type=str, help="Relative path to the output directory") options, args = p.parse_args() names = [] i_types, it_types, getter_code = get_thunk_type_set() # Generate *_impl.h for each compilation unit for unit_name, routines in COMPILATION_UNITS: thunks = [] methods = [] # Generate thunks and methods for all routines for line in routines.splitlines(): line = line.strip() if not line or line.startswith('#'): continue try: name, args = line.split(None, 1) except ValueError as e: raise ValueError(f"Malformed line: {line!r}") from e args = "".join(args.split()) if 't' in args or 'T' in args: thunk, method = parse_routine(name, args, it_types) else: thunk, method = parse_routine(name, args, i_types) if name in names: raise ValueError(f"Duplicate routine {name!r}") names.append(name) thunks.append(thunk) methods.append(method) # Produce output if options.outdir: # Used by Meson (options.outdir == scipy/sparse/sparsetools) outdir = os.path.join(os.getcwd(), options.outdir) else: # Used by setup.py outdir = os.path.join(os.path.dirname(__file__), 'sparsetools') dst = os.path.join(outdir, unit_name + '_impl.h') if newer(__file__, dst) or options.force: if not options.outdir: # Be silent if we're using Meson. TODO: add --verbose option print(f"[generate_sparsetools] generating {dst!r}") with open(dst, 'w') as f: write_autogen_blurb(f) f.write(getter_code) for thunk in thunks: f.write(thunk) for method in methods: f.write(method) else: if not options.outdir: # Be silent if we're using Meson print(f"[generate_sparsetools] {dst!r} already up-to-date") # Generate code for method struct method_defs = "" for name in names: method_defs += f"NPY_VISIBILITY_HIDDEN PyObject *{name}_method(PyObject *, PyObject *);\n" method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {""" for name in names: method_struct += """ {"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name) method_struct += """ {NULL, NULL, 0, NULL} };""" # Produce sparsetools_impl.h dst = os.path.join(outdir, 'sparsetools_impl.h') if newer(__file__, dst) or options.force: if not options.outdir: # Be silent if we're using Meson. print(f"[generate_sparsetools] generating {dst!r}") with open(dst, 'w') as f: write_autogen_blurb(f) f.write(method_defs) f.write(method_struct) else: if not options.outdir: # Be silent if we're using Meson print(f"[generate_sparsetools] {dst!r} already up-to-date") def write_autogen_blurb(stream): stream.write("""\ /* This file is autogenerated by generate_sparsetools.py * Do not edit manually or check into VCS. */ """) if __name__ == "__main__": main()
13,723
28.834783
98
py
scipy
scipy-main/scipy/sparse/_spfuncs.py
""" Functions that operate on sparse matrices """ __all__ = ['count_blocks','estimate_blocksize'] from ._base import issparse from ._csr import csr_array from ._sparsetools import csr_count_blocks def estimate_blocksize(A,efficiency=0.7): """Attempt to determine the blocksize of a sparse matrix Returns a blocksize=(r,c) such that - A.nnz / A.tobsr( (r,c) ).nnz > efficiency """ if not (issparse(A) and A.format in ("csc", "csr")): A = csr_array(A) if A.nnz == 0: return (1,1) if not 0 < efficiency < 1.0: raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0') high_efficiency = (1.0 + efficiency) / 2.0 nnz = float(A.nnz) M,N = A.shape if M % 2 == 0 and N % 2 == 0: e22 = nnz / (4 * count_blocks(A,(2,2))) else: e22 = 0.0 if M % 3 == 0 and N % 3 == 0: e33 = nnz / (9 * count_blocks(A,(3,3))) else: e33 = 0.0 if e22 > high_efficiency and e33 > high_efficiency: e66 = nnz / (36 * count_blocks(A,(6,6))) if e66 > efficiency: return (6,6) else: return (3,3) else: if M % 4 == 0 and N % 4 == 0: e44 = nnz / (16 * count_blocks(A,(4,4))) else: e44 = 0.0 if e44 > efficiency: return (4,4) elif e33 > efficiency: return (3,3) elif e22 > efficiency: return (2,2) else: return (1,1) def count_blocks(A,blocksize): """For a given blocksize=(r,c) count the number of occupied blocks in a sparse matrix A """ r,c = blocksize if r < 1 or c < 1: raise ValueError('r and c must be positive') if issparse(A): if A.format == "csr": M,N = A.shape return csr_count_blocks(M,N,r,c,A.indptr,A.indices) elif A.format == "csc": return count_blocks(A.T,(c,r)) return count_blocks(csr_array(A),blocksize)
1,987
24.818182
74
py
scipy
scipy-main/scipy/sparse/_compressed.py
"""Base class for sparse matrix formats using compressed storage.""" __all__ = [] from warnings import warn import operator import numpy as np from scipy._lib._util import _prune_array from ._base import _spbase, issparse, SparseEfficiencyWarning from ._data import _data_matrix, _minmax_mixin from . import _sparsetools from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense, csr_sample_values, csr_row_index, csr_row_slice, csr_column_index1, csr_column_index2) from ._index import IndexMixin from ._sputils import (upcast, upcast_char, to_native, isdense, isshape, getdtype, isscalarlike, isintlike, downcast_intp_index, get_sum_dtype, check_shape, is_pydata_spmatrix) class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): """base matrix class for compressed row- and column-oriented matrices""" def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) if issparse(arg1): if arg1.format == self.format and copy: arg1 = arg1.copy() else: arg1 = arg1.asformat(self.format) self._set_self(arg1) elif isinstance(arg1, tuple): if isshape(arg1): # It's a tuple of matrix dimensions (M, N) # create empty matrix self._shape = check_shape(arg1) M, N = self.shape # Select index dtype large enough to pass array and # scalar parameters to sparsetools idx_dtype = self._get_index_dtype(maxval=max(M, N)) self.data = np.zeros(0, getdtype(dtype, default=float)) self.indices = np.zeros(0, idx_dtype) self.indptr = np.zeros(self._swap((M, N))[0] + 1, dtype=idx_dtype) else: if len(arg1) == 2: # (data, ij) format other = self.__class__( self._coo_container(arg1, shape=shape, dtype=dtype) ) self._set_self(other) elif len(arg1) == 3: # (data, indices, indptr) format (data, indices, indptr) = arg1 # Select index dtype large enough to pass array and # scalar parameters to sparsetools maxval = None if shape is not None: maxval = max(shape) idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval, check_contents=True) self.indices = np.array(indices, copy=copy, dtype=idx_dtype) self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) self.data = np.array(data, copy=copy, dtype=dtype) else: raise ValueError("unrecognized {}_matrix " "constructor usage".format(self.format)) else: # must be dense try: arg1 = np.asarray(arg1) except Exception as e: raise ValueError("unrecognized {}_matrix constructor usage" "".format(self.format)) from e self._set_self(self.__class__( self._coo_container(arg1, dtype=dtype) )) # Read matrix dimensions given, if any if shape is not None: self._shape = check_shape(shape) else: if self.shape is None: # shape not already set, try to infer dimensions try: major_dim = len(self.indptr) - 1 minor_dim = self.indices.max() + 1 except Exception as e: raise ValueError('unable to infer matrix dimensions') from e else: self._shape = check_shape(self._swap((major_dim, minor_dim))) if dtype is not None: self.data = self.data.astype(dtype, copy=False) self.check_format(full_check=False) def _getnnz(self, axis=None): if axis is None: return int(self.indptr[-1]) else: if axis < 0: axis += 2 axis, _ = self._swap((axis, 1 - axis)) _, N = self._swap(self.shape) if axis == 0: return np.bincount(downcast_intp_index(self.indices), minlength=N) elif axis == 1: return np.diff(self.indptr) raise ValueError('axis out of bounds') _getnnz.__doc__ = _spbase._getnnz.__doc__ def _set_self(self, other, copy=False): """take the member variables of other and assign them to self""" if copy: other = other.copy() self.data = other.data self.indices = other.indices self.indptr = other.indptr self._shape = check_shape(other.shape) def check_format(self, full_check=True): """Check whether the matrix respects the CSR or CSC format. Parameters ---------- full_check : bool, optional If `True`, run rigorous check, scanning arrays for valid values. Note that activating those check might copy arrays for casting, modifying indices and index pointers' inplace. If `False`, run basic checks on attributes. O(1) operations. Default is `True`. """ # use _swap to determine proper bounds major_name, minor_name = self._swap(('row', 'column')) major_dim, minor_dim = self._swap(self.shape) # index arrays should have integer data types if self.indptr.dtype.kind != 'i': warn("indptr array has non-integer dtype ({})" "".format(self.indptr.dtype.name), stacklevel=3) if self.indices.dtype.kind != 'i': warn("indices array has non-integer dtype ({})" "".format(self.indices.dtype.name), stacklevel=3) # check array shapes for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]: if x != 1: raise ValueError('data, indices, and indptr should be 1-D') # check index pointer if (len(self.indptr) != major_dim + 1): raise ValueError("index pointer size ({}) should be ({})" "".format(len(self.indptr), major_dim + 1)) if (self.indptr[0] != 0): raise ValueError("index pointer should start with 0") # check index and data arrays if (len(self.indices) != len(self.data)): raise ValueError("indices and data should have the same size") if (self.indptr[-1] > len(self.indices)): raise ValueError("Last value of index pointer should be less than " "the size of index and data arrays") self.prune() if full_check: # check format validity (more expensive) if self.nnz > 0: if self.indices.max() >= minor_dim: raise ValueError("{} index values must be < {}" "".format(minor_name, minor_dim)) if self.indices.min() < 0: raise ValueError("{} index values must be >= 0" "".format(minor_name)) if np.diff(self.indptr).min() < 0: raise ValueError("index pointer values must form a " "non-decreasing sequence") idx_dtype = self._get_index_dtype((self.indptr, self.indices)) self.indptr = np.asarray(self.indptr, dtype=idx_dtype) self.indices = np.asarray(self.indices, dtype=idx_dtype) self.data = to_native(self.data) # if not self.has_sorted_indices(): # warn('Indices were not in sorted order. Sorting indices.') # self.sort_indices() # assert(self.has_sorted_indices()) # TODO check for duplicates? ####################### # Boolean comparisons # ####################### def _scalar_binopt(self, other, op): """Scalar version of self._binopt, for cases in which no new nonzeros are added. Produces a new sparse array in canonical form. """ self.sum_duplicates() res = self._with_data(op(self.data, other), copy=True) res.eliminate_zeros() return res def __eq__(self, other): # Scalar other. if isscalarlike(other): if np.isnan(other): return self.__class__(self.shape, dtype=np.bool_) if other == 0: warn("Comparing a sparse matrix with 0 using == is inefficient" ", try using != instead.", SparseEfficiencyWarning, stacklevel=3) all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) inv = self._scalar_binopt(other, operator.ne) return all_true - inv else: return self._scalar_binopt(other, operator.eq) # Dense other. elif isdense(other): return self.todense() == other # Pydata sparse other. elif is_pydata_spmatrix(other): return NotImplemented # Sparse other. elif issparse(other): warn("Comparing sparse matrices using == is inefficient, try using" " != instead.", SparseEfficiencyWarning, stacklevel=3) # TODO sparse broadcasting if self.shape != other.shape: return False elif self.format != other.format: other = other.asformat(self.format) res = self._binopt(other, '_ne_') all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) return all_true - res else: return False def __ne__(self, other): # Scalar other. if isscalarlike(other): if np.isnan(other): warn("Comparing a sparse matrix with nan using != is" " inefficient", SparseEfficiencyWarning, stacklevel=3) all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) return all_true elif other != 0: warn("Comparing a sparse matrix with a nonzero scalar using !=" " is inefficient, try using == instead.", SparseEfficiencyWarning, stacklevel=3) all_true = self.__class__(np.ones(self.shape), dtype=np.bool_) inv = self._scalar_binopt(other, operator.eq) return all_true - inv else: return self._scalar_binopt(other, operator.ne) # Dense other. elif isdense(other): return self.todense() != other # Pydata sparse other. elif is_pydata_spmatrix(other): return NotImplemented # Sparse other. elif issparse(other): # TODO sparse broadcasting if self.shape != other.shape: return True elif self.format != other.format: other = other.asformat(self.format) return self._binopt(other, '_ne_') else: return True def _inequality(self, other, op, op_name, bad_scalar_msg): # Scalar other. if isscalarlike(other): if 0 == other and op_name in ('_le_', '_ge_'): raise NotImplementedError(" >= and <= don't work with 0.") elif op(0, other): warn(bad_scalar_msg, SparseEfficiencyWarning) other_arr = np.empty(self.shape, dtype=np.result_type(other)) other_arr.fill(other) other_arr = self.__class__(other_arr) return self._binopt(other_arr, op_name) else: return self._scalar_binopt(other, op) # Dense other. elif isdense(other): return op(self.todense(), other) # Sparse other. elif issparse(other): # TODO sparse broadcasting if self.shape != other.shape: raise ValueError("inconsistent shapes") elif self.format != other.format: other = other.asformat(self.format) if op_name not in ('_ge_', '_le_'): return self._binopt(other, op_name) warn("Comparing sparse matrices using >= and <= is inefficient, " "using <, >, or !=, instead.", SparseEfficiencyWarning) all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_') return all_true - res else: raise ValueError("Operands could not be compared.") def __lt__(self, other): return self._inequality(other, operator.lt, '_lt_', "Comparing a sparse matrix with a scalar " "greater than zero using < is inefficient, " "try using >= instead.") def __gt__(self, other): return self._inequality(other, operator.gt, '_gt_', "Comparing a sparse matrix with a scalar " "less than zero using > is inefficient, " "try using <= instead.") def __le__(self, other): return self._inequality(other, operator.le, '_le_', "Comparing a sparse matrix with a scalar " "greater than zero using <= is inefficient, " "try using > instead.") def __ge__(self, other): return self._inequality(other, operator.ge, '_ge_', "Comparing a sparse matrix with a scalar " "less than zero using >= is inefficient, " "try using < instead.") ################################# # Arithmetic operator overrides # ################################# def _add_dense(self, other): if other.shape != self.shape: raise ValueError('Incompatible shapes ({} and {})' .format(self.shape, other.shape)) dtype = upcast_char(self.dtype.char, other.dtype.char) order = self._swap('CF')[0] result = np.array(other, dtype=dtype, order=order, copy=True) M, N = self._swap(self.shape) y = result if result.flags.c_contiguous else result.T csr_todense(M, N, self.indptr, self.indices, self.data, y) return self._container(result, copy=False) def _add_sparse(self, other): return self._binopt(other, '_plus_') def _sub_sparse(self, other): return self._binopt(other, '_minus_') def multiply(self, other): """Point-wise multiplication by another matrix, vector, or scalar. """ # Scalar multiplication. if isscalarlike(other): return self._mul_scalar(other) # Sparse matrix or vector. if issparse(other): if self.shape == other.shape: other = self.__class__(other) return self._binopt(other, '_elmul_') # Single element. elif other.shape == (1, 1): return self._mul_scalar(other.toarray()[0, 0]) elif self.shape == (1, 1): return other._mul_scalar(self.toarray()[0, 0]) # A row times a column. elif self.shape[1] == 1 and other.shape[0] == 1: return self._mul_sparse_matrix(other.tocsc()) elif self.shape[0] == 1 and other.shape[1] == 1: return other._mul_sparse_matrix(self.tocsc()) # Row vector times matrix. other is a row. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: other = self._dia_container( (other.toarray().ravel(), [0]), shape=(other.shape[1], other.shape[1]) ) return self._mul_sparse_matrix(other) # self is a row. elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: copy = self._dia_container( (self.toarray().ravel(), [0]), shape=(self.shape[1], self.shape[1]) ) return other._mul_sparse_matrix(copy) # Column vector times matrix. other is a column. elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: other = self._dia_container( (other.toarray().ravel(), [0]), shape=(other.shape[0], other.shape[0]) ) return other._mul_sparse_matrix(self) # self is a column. elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: copy = self._dia_container( (self.toarray().ravel(), [0]), shape=(self.shape[0], self.shape[0]) ) return copy._mul_sparse_matrix(other) else: raise ValueError("inconsistent shapes") # Assume other is a dense matrix/array, which produces a single-item # object array if other isn't convertible to ndarray. other = np.atleast_2d(other) if other.ndim != 2: return np.multiply(self.toarray(), other) # Single element / wrapped object. if other.size == 1: return self._mul_scalar(other.flat[0]) # Fast case for trivial sparse matrix. elif self.shape == (1, 1): return np.multiply(self.toarray()[0, 0], other) ret = self.tocoo() # Matching shapes. if self.shape == other.shape: data = np.multiply(ret.data, other[ret.row, ret.col]) # Sparse row vector times... elif self.shape[0] == 1: if other.shape[1] == 1: # Dense column vector. data = np.multiply(ret.data, other) elif other.shape[1] == self.shape[1]: # Dense matrix. data = np.multiply(ret.data, other[:, ret.col]) else: raise ValueError("inconsistent shapes") row = np.repeat(np.arange(other.shape[0]), len(ret.row)) col = np.tile(ret.col, other.shape[0]) return self._coo_container( (data.view(np.ndarray).ravel(), (row, col)), shape=(other.shape[0], self.shape[1]), copy=False ) # Sparse column vector times... elif self.shape[1] == 1: if other.shape[0] == 1: # Dense row vector. data = np.multiply(ret.data[:, None], other) elif other.shape[0] == self.shape[0]: # Dense matrix. data = np.multiply(ret.data[:, None], other[ret.row]) else: raise ValueError("inconsistent shapes") row = np.repeat(ret.row, other.shape[1]) col = np.tile(np.arange(other.shape[1]), len(ret.col)) return self._coo_container( (data.view(np.ndarray).ravel(), (row, col)), shape=(self.shape[0], other.shape[1]), copy=False ) # Sparse matrix times dense row vector. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: data = np.multiply(ret.data, other[:, ret.col].ravel()) # Sparse matrix times dense column vector. elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: data = np.multiply(ret.data, other[ret.row].ravel()) else: raise ValueError("inconsistent shapes") ret.data = data.view(np.ndarray).ravel() return ret ########################### # Multiplication handlers # ########################### def _mul_vector(self, other): M, N = self.shape # output array result = np.zeros(M, dtype=upcast_char(self.dtype.char, other.dtype.char)) # csr_matvec or csc_matvec fn = getattr(_sparsetools, self.format + '_matvec') fn(M, N, self.indptr, self.indices, self.data, other, result) return result def _mul_multivector(self, other): M, N = self.shape n_vecs = other.shape[1] # number of column vectors result = np.zeros((M, n_vecs), dtype=upcast_char(self.dtype.char, other.dtype.char)) # csr_matvecs or csc_matvecs fn = getattr(_sparsetools, self.format + '_matvecs') fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel()) return result def _mul_sparse_matrix(self, other): M, K1 = self.shape K2, N = other.shape major_axis = self._swap((M, N))[0] other = self.__class__(other) # convert to this format idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices)) fn = getattr(_sparsetools, self.format + '_matmat_maxnnz') nnz = fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype)) idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=nnz) indptr = np.empty(major_axis + 1, dtype=idx_dtype) indices = np.empty(nnz, dtype=idx_dtype) data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype)) fn = getattr(_sparsetools, self.format + '_matmat') fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), self.data, np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype), other.data, indptr, indices, data) return self.__class__((data, indices, indptr), shape=(M, N)) def diagonal(self, k=0): rows, cols = self.shape if k <= -rows or k >= cols: return np.empty(0, dtype=self.data.dtype) fn = getattr(_sparsetools, self.format + "_diagonal") y = np.empty(min(rows + min(k, 0), cols - max(k, 0)), dtype=upcast(self.dtype)) fn(k, self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y) return y diagonal.__doc__ = _spbase.diagonal.__doc__ ##################### # Other binary ops # ##################### def _maximum_minimum(self, other, npop, op_name, dense_check): if isscalarlike(other): if dense_check(other): warn("Taking maximum (minimum) with > 0 (< 0) number results" " to a dense matrix.", SparseEfficiencyWarning, stacklevel=3) other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype) other_arr.fill(other) other_arr = self.__class__(other_arr) return self._binopt(other_arr, op_name) else: self.sum_duplicates() new_data = npop(self.data, np.asarray(other)) mat = self.__class__((new_data, self.indices, self.indptr), dtype=new_data.dtype, shape=self.shape) return mat elif isdense(other): return npop(self.todense(), other) elif issparse(other): return self._binopt(other, op_name) else: raise ValueError("Operands not compatible.") def maximum(self, other): return self._maximum_minimum(other, np.maximum, '_maximum_', lambda x: np.asarray(x) > 0) maximum.__doc__ = _spbase.maximum.__doc__ def minimum(self, other): return self._maximum_minimum(other, np.minimum, '_minimum_', lambda x: np.asarray(x) < 0) minimum.__doc__ = _spbase.minimum.__doc__ ##################### # Reduce operations # ##################### def sum(self, axis=None, dtype=None, out=None): """Sum the matrix over the given axis. If the axis is None, sum over both rows and columns, returning a scalar. """ # The _spbase base class already does axis=0 and axis=1 efficiently # so we only do the case axis=None here if (not hasattr(self, 'blocksize') and axis in self._swap(((1, -1), (0, 2)))[0]): # faster than multiplication for large minor axis in CSC/CSR res_dtype = get_sum_dtype(self.dtype) ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype) major_index, value = self._minor_reduce(np.add) ret[major_index] = value ret = self._ascontainer(ret) if axis % 2 == 1: ret = ret.T if out is not None and out.shape != ret.shape: raise ValueError('dimensions do not match') return ret.sum(axis=(), dtype=dtype, out=out) # _spbase will handle the remaining situations when axis # is in {None, -1, 0, 1} else: return _spbase.sum(self, axis=axis, dtype=dtype, out=out) sum.__doc__ = _spbase.sum.__doc__ def _minor_reduce(self, ufunc, data=None): """Reduce nonzeros with a ufunc over the minor axis when non-empty Can be applied to a function of self.data by supplying data parameter. Warning: this does not call sum_duplicates() Returns ------- major_index : array of ints Major indices where nonzero value : array of self.dtype Reduce result for nonzeros in each major_index """ if data is None: data = self.data major_index = np.flatnonzero(np.diff(self.indptr)) value = ufunc.reduceat(data, downcast_intp_index(self.indptr[major_index])) return major_index, value ####################### # Getting and Setting # ####################### def _get_intXint(self, row, col): M, N = self._swap(self.shape) major, minor = self._swap((row, col)) indptr, indices, data = get_csr_submatrix( M, N, self.indptr, self.indices, self.data, major, major + 1, minor, minor + 1) return data.sum(dtype=self.dtype) def _get_sliceXslice(self, row, col): major, minor = self._swap((row, col)) if major.step in (1, None) and minor.step in (1, None): return self._get_submatrix(major, minor, copy=True) return self._major_slice(major)._minor_slice(minor) def _get_arrayXarray(self, row, col): # inner indexing idx_dtype = self.indices.dtype M, N = self._swap(self.shape) major, minor = self._swap((row, col)) major = np.asarray(major, dtype=idx_dtype) minor = np.asarray(minor, dtype=idx_dtype) val = np.empty(major.size, dtype=self.dtype) csr_sample_values(M, N, self.indptr, self.indices, self.data, major.size, major.ravel(), minor.ravel(), val) if major.ndim == 1: return self._ascontainer(val) return self.__class__(val.reshape(major.shape)) def _get_columnXarray(self, row, col): # outer indexing major, minor = self._swap((row, col)) return self._major_index_fancy(major)._minor_index_fancy(minor) def _major_index_fancy(self, idx): """Index along the major axis where idx is an array of ints. """ idx_dtype = self.indices.dtype indices = np.asarray(idx, dtype=idx_dtype).ravel() _, N = self._swap(self.shape) M = len(indices) new_shape = self._swap((M, N)) if M == 0: return self.__class__(new_shape, dtype=self.dtype) row_nnz = self.indptr[indices + 1] - self.indptr[indices] idx_dtype = self.indices.dtype res_indptr = np.zeros(M+1, dtype=idx_dtype) np.cumsum(row_nnz, out=res_indptr[1:]) nnz = res_indptr[-1] res_indices = np.empty(nnz, dtype=idx_dtype) res_data = np.empty(nnz, dtype=self.dtype) csr_row_index(M, indices, self.indptr, self.indices, self.data, res_indices, res_data) return self.__class__((res_data, res_indices, res_indptr), shape=new_shape, copy=False) def _major_slice(self, idx, copy=False): """Index along the major axis where idx is a slice object. """ if idx == slice(None): return self.copy() if copy else self M, N = self._swap(self.shape) start, stop, step = idx.indices(M) M = len(range(start, stop, step)) new_shape = self._swap((M, N)) if M == 0: return self.__class__(new_shape, dtype=self.dtype) # Work out what slices are needed for `row_nnz` # start,stop can be -1, only if step is negative start0, stop0 = start, stop if stop == -1 and start >= 0: stop0 = None start1, stop1 = start + 1, stop + 1 row_nnz = self.indptr[start1:stop1:step] - \ self.indptr[start0:stop0:step] idx_dtype = self.indices.dtype res_indptr = np.zeros(M+1, dtype=idx_dtype) np.cumsum(row_nnz, out=res_indptr[1:]) if step == 1: all_idx = slice(self.indptr[start], self.indptr[stop]) res_indices = np.array(self.indices[all_idx], copy=copy) res_data = np.array(self.data[all_idx], copy=copy) else: nnz = res_indptr[-1] res_indices = np.empty(nnz, dtype=idx_dtype) res_data = np.empty(nnz, dtype=self.dtype) csr_row_slice(start, stop, step, self.indptr, self.indices, self.data, res_indices, res_data) return self.__class__((res_data, res_indices, res_indptr), shape=new_shape, copy=False) def _minor_index_fancy(self, idx): """Index along the minor axis where idx is an array of ints. """ idx_dtype = self.indices.dtype idx = np.asarray(idx, dtype=idx_dtype).ravel() M, N = self._swap(self.shape) k = len(idx) new_shape = self._swap((M, k)) if k == 0: return self.__class__(new_shape, dtype=self.dtype) # pass 1: count idx entries and compute new indptr col_offsets = np.zeros(N, dtype=idx_dtype) res_indptr = np.empty_like(self.indptr) csr_column_index1(k, idx, M, N, self.indptr, self.indices, col_offsets, res_indptr) # pass 2: copy indices/data for selected idxs col_order = np.argsort(idx).astype(idx_dtype, copy=False) nnz = res_indptr[-1] res_indices = np.empty(nnz, dtype=idx_dtype) res_data = np.empty(nnz, dtype=self.dtype) csr_column_index2(col_order, col_offsets, len(self.indices), self.indices, self.data, res_indices, res_data) return self.__class__((res_data, res_indices, res_indptr), shape=new_shape, copy=False) def _minor_slice(self, idx, copy=False): """Index along the minor axis where idx is a slice object. """ if idx == slice(None): return self.copy() if copy else self M, N = self._swap(self.shape) start, stop, step = idx.indices(N) N = len(range(start, stop, step)) if N == 0: return self.__class__(self._swap((M, N)), dtype=self.dtype) if step == 1: return self._get_submatrix(minor=idx, copy=copy) # TODO: don't fall back to fancy indexing here return self._minor_index_fancy(np.arange(start, stop, step)) def _get_submatrix(self, major=None, minor=None, copy=False): """Return a submatrix of this matrix. major, minor: None, int, or slice with step 1 """ M, N = self._swap(self.shape) i0, i1 = _process_slice(major, M) j0, j1 = _process_slice(minor, N) if i0 == 0 and j0 == 0 and i1 == M and j1 == N: return self.copy() if copy else self indptr, indices, data = get_csr_submatrix( M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1) shape = self._swap((i1 - i0, j1 - j0)) return self.__class__((data, indices, indptr), shape=shape, dtype=self.dtype, copy=False) def _set_intXint(self, row, col, x): i, j = self._swap((row, col)) self._set_many(i, j, x) def _set_arrayXarray(self, row, col, x): i, j = self._swap((row, col)) self._set_many(i, j, x) def _set_arrayXarray_sparse(self, row, col, x): # clear entries that will be overwritten self._zero_many(*self._swap((row, col))) M, N = row.shape # matches col.shape broadcast_row = M != 1 and x.shape[0] == 1 broadcast_col = N != 1 and x.shape[1] == 1 r, c = x.row, x.col x = np.asarray(x.data, dtype=self.dtype) if x.size == 0: return if broadcast_row: r = np.repeat(np.arange(M), len(r)) c = np.tile(c, M) x = np.tile(x, M) if broadcast_col: r = np.repeat(r, N) c = np.tile(np.arange(N), len(c)) x = np.repeat(x, N) # only assign entries in the new sparsity structure i, j = self._swap((row[r, c], col[r, c])) self._set_many(i, j, x) def _setdiag(self, values, k): if 0 in self.shape: return M, N = self.shape broadcast = (values.ndim == 0) if k < 0: if broadcast: max_index = min(M + k, N) else: max_index = min(M + k, N, len(values)) i = np.arange(max_index, dtype=self.indices.dtype) j = np.arange(max_index, dtype=self.indices.dtype) i -= k else: if broadcast: max_index = min(M, N - k) else: max_index = min(M, N - k, len(values)) i = np.arange(max_index, dtype=self.indices.dtype) j = np.arange(max_index, dtype=self.indices.dtype) j += k if not broadcast: values = values[:len(i)] self[i, j] = values def _prepare_indices(self, i, j): M, N = self._swap(self.shape) def check_bounds(indices, bound): idx = indices.max() if idx >= bound: raise IndexError('index (%d) out of range (>= %d)' % (idx, bound)) idx = indices.min() if idx < -bound: raise IndexError('index (%d) out of range (< -%d)' % (idx, bound)) i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel() j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel() check_bounds(i, M) check_bounds(j, N) return i, j, M, N def _set_many(self, i, j, x): """Sets value at each (i, j) to x Here (i,j) index major and minor respectively, and must not contain duplicate entries. """ i, j, M, N = self._prepare_indices(i, j) x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel() n_samples = x.size offsets = np.empty(n_samples, dtype=self.indices.dtype) ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if ret == 1: # rinse and repeat self.sum_duplicates() csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if -1 not in offsets: # only affects existing non-zero cells self.data[offsets] = x return else: warn("Changing the sparsity structure of a {}_matrix is expensive." " lil_matrix is more efficient.".format(self.format), SparseEfficiencyWarning, stacklevel=3) # replace where possible mask = offsets > -1 self.data[offsets[mask]] = x[mask] # only insertions remain mask = ~mask i = i[mask] i[i < 0] += M j = j[mask] j[j < 0] += N self._insert_many(i, j, x[mask]) def _zero_many(self, i, j): """Sets value at each (i, j) to zero, preserving sparsity structure. Here (i,j) index major and minor respectively. """ i, j, M, N = self._prepare_indices(i, j) n_samples = len(i) offsets = np.empty(n_samples, dtype=self.indices.dtype) ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) if ret == 1: # rinse and repeat self.sum_duplicates() csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, i, j, offsets) # only assign zeros to the existing sparsity structure self.data[offsets[offsets > -1]] = 0 def _insert_many(self, i, j, x): """Inserts new nonzero at each (i, j) with value x Here (i,j) index major and minor respectively. i, j and x must be non-empty, 1d arrays. Inserts each major group (e.g. all entries per row) at a time. Maintains has_sorted_indices property. Modifies i, j, x in place. """ order = np.argsort(i, kind='mergesort') # stable for duplicates i = i.take(order, mode='clip') j = j.take(order, mode='clip') x = x.take(order, mode='clip') do_sort = self.has_sorted_indices # Update index data type idx_dtype = self._get_index_dtype((self.indices, self.indptr), maxval=(self.indptr[-1] + x.size)) self.indptr = np.asarray(self.indptr, dtype=idx_dtype) self.indices = np.asarray(self.indices, dtype=idx_dtype) i = np.asarray(i, dtype=idx_dtype) j = np.asarray(j, dtype=idx_dtype) # Collate old and new in chunks by major index indices_parts = [] data_parts = [] ui, ui_indptr = np.unique(i, return_index=True) ui_indptr = np.append(ui_indptr, len(j)) new_nnzs = np.diff(ui_indptr) prev = 0 for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])): # old entries start = self.indptr[prev] stop = self.indptr[ii] indices_parts.append(self.indices[start:stop]) data_parts.append(self.data[start:stop]) # handle duplicate j: keep last setting uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True) if len(uj) == je - js: indices_parts.append(j[js:je]) data_parts.append(x[js:je]) else: indices_parts.append(j[js:je][::-1][uj_indptr]) data_parts.append(x[js:je][::-1][uj_indptr]) new_nnzs[c] = len(uj) prev = ii # remaining old entries start = self.indptr[ii] indices_parts.append(self.indices[start:]) data_parts.append(self.data[start:]) # update attributes self.indices = np.concatenate(indices_parts) self.data = np.concatenate(data_parts) nnzs = np.empty(self.indptr.shape, dtype=idx_dtype) nnzs[0] = idx_dtype(0) indptr_diff = np.diff(self.indptr) indptr_diff[ui] += new_nnzs nnzs[1:] = indptr_diff self.indptr = np.cumsum(nnzs, out=nnzs) if do_sort: # TODO: only sort where necessary self.has_sorted_indices = False self.sort_indices() self.check_format(full_check=False) ###################### # Conversion methods # ###################### def tocoo(self, copy=True): major_dim, minor_dim = self._swap(self.shape) minor_indices = self.indices major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) _sparsetools.expandptr(major_dim, self.indptr, major_indices) row, col = self._swap((major_indices, minor_indices)) return self._coo_container( (self.data, (row, col)), self.shape, copy=copy, dtype=self.dtype ) tocoo.__doc__ = _spbase.tocoo.__doc__ def toarray(self, order=None, out=None): if out is None and order is None: order = self._swap('cf')[0] out = self._process_toarray_args(order, out) if not (out.flags.c_contiguous or out.flags.f_contiguous): raise ValueError('Output array must be C or F contiguous') # align ideal order with output array order if out.flags.c_contiguous: x = self.tocsr() y = out else: x = self.tocsc() y = out.T M, N = x._swap(x.shape) csr_todense(M, N, x.indptr, x.indices, x.data, y) return out toarray.__doc__ = _spbase.toarray.__doc__ ############################################################## # methods that examine or modify the internal data structure # ############################################################## def eliminate_zeros(self): """Remove zero entries from the matrix This is an *in place* operation. """ M, N = self._swap(self.shape) _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices, self.data) self.prune() # nnz may have changed def __get_has_canonical_format(self): """Determine whether the matrix has sorted indices and no duplicates Returns - True: if the above applies - False: otherwise has_canonical_format implies has_sorted_indices, so if the latter flag is False, so will the former be; if the former is found True, the latter flag is also set. """ # first check to see if result was cached if not getattr(self, '_has_sorted_indices', True): # not sorted => not canonical self._has_canonical_format = False elif not hasattr(self, '_has_canonical_format'): self.has_canonical_format = bool( _sparsetools.csr_has_canonical_format( len(self.indptr) - 1, self.indptr, self.indices)) return self._has_canonical_format def __set_has_canonical_format(self, val): self._has_canonical_format = bool(val) if val: self.has_sorted_indices = True has_canonical_format = property(fget=__get_has_canonical_format, fset=__set_has_canonical_format) def sum_duplicates(self): """Eliminate duplicate matrix entries by adding them together This is an *in place* operation. """ if self.has_canonical_format: return self.sort_indices() M, N = self._swap(self.shape) _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices, self.data) self.prune() # nnz may have changed self.has_canonical_format = True def __get_sorted(self): """Determine whether the matrix has sorted indices Returns - True: if the indices of the matrix are in sorted order - False: otherwise """ # first check to see if result was cached if not hasattr(self, '_has_sorted_indices'): self._has_sorted_indices = bool( _sparsetools.csr_has_sorted_indices( len(self.indptr) - 1, self.indptr, self.indices)) return self._has_sorted_indices def __set_sorted(self, val): self._has_sorted_indices = bool(val) has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted) def sorted_indices(self): """Return a copy of this matrix with sorted indices """ A = self.copy() A.sort_indices() return A # an alternative that has linear complexity is the following # although the previous option is typically faster # return self.toother().toother() def sort_indices(self): """Sort the indices of this matrix *in place* """ if not self.has_sorted_indices: _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, self.indices, self.data) self.has_sorted_indices = True def prune(self): """Remove empty space after all non-zero elements. """ major_dim = self._swap(self.shape)[0] if len(self.indptr) != major_dim + 1: raise ValueError('index pointer has invalid length') if len(self.indices) < self.nnz: raise ValueError('indices array has fewer than nnz elements') if len(self.data) < self.nnz: raise ValueError('data array has fewer than nnz elements') self.indices = _prune_array(self.indices[:self.nnz]) self.data = _prune_array(self.data[:self.nnz]) def resize(self, *shape): shape = check_shape(shape) if hasattr(self, 'blocksize'): bm, bn = self.blocksize new_M, rm = divmod(shape[0], bm) new_N, rn = divmod(shape[1], bn) if rm or rn: raise ValueError("shape must be divisible into {} blocks. " "Got {}".format(self.blocksize, shape)) M, N = self.shape[0] // bm, self.shape[1] // bn else: new_M, new_N = self._swap(shape) M, N = self._swap(self.shape) if new_M < M: self.indices = self.indices[:self.indptr[new_M]] self.data = self.data[:self.indptr[new_M]] self.indptr = self.indptr[:new_M + 1] elif new_M > M: self.indptr = np.resize(self.indptr, new_M + 1) self.indptr[M + 1:].fill(self.indptr[M]) if new_N < N: mask = self.indices < new_N if not np.all(mask): self.indices = self.indices[mask] self.data = self.data[mask] major_index, val = self._minor_reduce(np.add, mask) self.indptr.fill(0) self.indptr[1:][major_index] = val np.cumsum(self.indptr, out=self.indptr) self._shape = shape resize.__doc__ = _spbase.resize.__doc__ ################### # utility methods # ################### # needed by _data_matrix def _with_data(self, data, copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays (i.e. .indptr and .indices) are copied. """ if copy: return self.__class__((data, self.indices.copy(), self.indptr.copy()), shape=self.shape, dtype=data.dtype) else: return self.__class__((data, self.indices, self.indptr), shape=self.shape, dtype=data.dtype) def _binopt(self, other, op): """apply the binary operation fn to two sparse matrices.""" other = self.__class__(other) # e.g. csr_plus_csr, csr_minus_csr, etc. fn = getattr(_sparsetools, self.format + op + self.format) maxnnz = self.nnz + other.nnz idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=maxnnz) indptr = np.empty(self.indptr.shape, dtype=idx_dtype) indices = np.empty(maxnnz, dtype=idx_dtype) bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] if op in bool_ops: data = np.empty(maxnnz, dtype=np.bool_) else: data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype)) fn(self.shape[0], self.shape[1], np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), self.data, np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype), other.data, indptr, indices, data) A = self.__class__((data, indices, indptr), shape=self.shape) A.prune() return A def _divide_sparse(self, other): """ Divide this matrix by a second sparse matrix. """ if other.shape != self.shape: raise ValueError('inconsistent shapes') r = self._binopt(other, '_eldiv_') if np.issubdtype(r.dtype, np.inexact): # Eldiv leaves entries outside the combined sparsity # pattern empty, so they must be filled manually. # Everything outside of other's sparsity is NaN, and everything # inside it is either zero or defined by eldiv. out = np.empty(self.shape, dtype=self.dtype) out.fill(np.nan) row, col = other.nonzero() out[row, col] = 0 r = r.tocoo() out[r.row, r.col] = r.data out = self._container(out) else: # integers types go with nan <-> 0 out = r return out def _process_slice(sl, num): if sl is None: i0, i1 = 0, num elif isinstance(sl, slice): i0, i1, stride = sl.indices(num) if stride != 1: raise ValueError('slicing with step != 1 not supported') i0 = min(i0, i1) # give an empty slice when i0 > i1 elif isintlike(sl): if sl < 0: sl += num i0, i1 = sl, sl + 1 if i0 < 0 or i1 > num: raise IndexError('index out of bounds: 0 <= %d < %d <= %d' % (i0, i1, num)) else: raise TypeError('expected slice or scalar') return i0, i1
51,332
37.859198
106
py
scipy
scipy-main/scipy/sparse/_base.py
"""Base class for sparse matrices""" from warnings import warn import numpy as np from ._sputils import (asmatrix, check_reshape_kwargs, check_shape, get_sum_dtype, isdense, isscalarlike, matrix, validateaxis,) from ._matrix import spmatrix __all__ = ['isspmatrix', 'issparse', 'sparray', 'SparseWarning', 'SparseEfficiencyWarning'] class SparseWarning(Warning): pass class SparseFormatWarning(SparseWarning): pass class SparseEfficiencyWarning(SparseWarning): pass # The formats that we might potentially understand. _formats = {'csc': [0, "Compressed Sparse Column"], 'csr': [1, "Compressed Sparse Row"], 'dok': [2, "Dictionary Of Keys"], 'lil': [3, "List of Lists"], 'dod': [4, "Dictionary of Dictionaries"], 'sss': [5, "Symmetric Sparse Skyline"], 'coo': [6, "COOrdinate"], 'lba': [7, "Linpack BAnded"], 'egd': [8, "Ellpack-itpack Generalized Diagonal"], 'dia': [9, "DIAgonal"], 'bsr': [10, "Block Sparse Row"], 'msr': [11, "Modified compressed Sparse Row"], 'bsc': [12, "Block Sparse Column"], 'msc': [13, "Modified compressed Sparse Column"], 'ssk': [14, "Symmetric SKyline"], 'nsk': [15, "Nonsymmetric SKyline"], 'jad': [16, "JAgged Diagonal"], 'uss': [17, "Unsymmetric Sparse Skyline"], 'vbr': [18, "Variable Block Row"], 'und': [19, "Undefined"] } # These univariate ufuncs preserve zeros. _ufuncs_with_fixed_point_at_zero = frozenset([ np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh, np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad, np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt]) MAXPRINT = 50 class _spbase: """ This class provides a base class for all sparse arrays. It cannot be instantiated. Most of the work is provided by subclasses. """ __array_priority__ = 10.1 _format = 'und' # undefined ndim = 2 @property def _bsr_container(self): from ._bsr import bsr_array return bsr_array @property def _coo_container(self): from ._coo import coo_array return coo_array @property def _csc_container(self): from ._csc import csc_array return csc_array @property def _csr_container(self): from ._csr import csr_array return csr_array @property def _dia_container(self): from ._dia import dia_array return dia_array @property def _dok_container(self): from ._dok import dok_array return dok_array @property def _lil_container(self): from ._lil import lil_array return lil_array _is_array = True def __init__(self, maxprint=MAXPRINT): self._shape = None if self.__class__.__name__ == '_spbase': raise ValueError("This class is not intended" " to be instantiated directly.") self.maxprint = maxprint # Use this in 1.13.0 and later: # # @property # def shape(self): # return self._shape def reshape(self, *args, **kwargs): """reshape(self, shape, order='C', copy=False) Gives a new shape to a sparse array without changing its data. Parameters ---------- shape : length-2 tuple of ints The new shape should be compatible with the original shape. order : {'C', 'F'}, optional Read the elements using this index order. 'C' means to read and write the elements using C-like index order; e.g., read entire first row, then second row, etc. 'F' means to read and write the elements using Fortran-like index order; e.g., read entire first column, then second column, etc. copy : bool, optional Indicates whether or not attributes of self should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array being used. Returns ------- reshaped : sparse array A sparse array with the given `shape`, not necessarily of the same format as the current object. See Also -------- numpy.reshape : NumPy's implementation of 'reshape' for ndarrays """ # If the shape already matches, don't bother doing an actual reshape # Otherwise, the default is to convert to COO and use its reshape shape = check_shape(args, self.shape) order, copy = check_reshape_kwargs(kwargs) if shape == self.shape: if copy: return self.copy() else: return self return self.tocoo(copy=copy).reshape(shape, order=order, copy=False) def resize(self, shape): """Resize the array in-place to dimensions given by ``shape`` Any elements that lie within the new shape will remain at the same indices, while non-zero elements lying outside the new shape are removed. Parameters ---------- shape : (int, int) number of rows and columns in the new array Notes ----- The semantics are not identical to `numpy.ndarray.resize` or `numpy.resize`. Here, the same data will be maintained at each index before and after reshape, if that index is within the new bounds. In numpy, resizing maintains contiguity of the array, moving elements around in the logical array but not within a flattened representation. We give no guarantees about whether the underlying data attributes (arrays, etc.) will be modified in place or replaced with new objects. """ # As an inplace operation, this requires implementation in each format. raise NotImplementedError( f'{type(self).__name__}.resize is not implemented') def astype(self, dtype, casting='unsafe', copy=True): """Cast the array elements to a specified type. Parameters ---------- dtype : string or numpy dtype Typecode or data-type to which to cast the data. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'unsafe' for backwards compatibility. 'no' means the data types should not be cast at all. 'equiv' means only byte-order changes are allowed. 'safe' means only casts which can preserve values are allowed. 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. 'unsafe' means any data conversions may be done. copy : bool, optional If `copy` is `False`, the result might share some memory with this array. If `copy` is `True`, it is guaranteed that the result and this array do not share any memory. """ dtype = np.dtype(dtype) if self.dtype != dtype: return self.tocsr().astype( dtype, casting=casting, copy=copy).asformat(self.format) elif copy: return self.copy() else: return self @classmethod def _ascontainer(cls, X, **kwargs): if cls._is_array: return np.asarray(X, **kwargs) else: return asmatrix(X, **kwargs) @classmethod def _container(cls, X, **kwargs): if cls._is_array: return np.array(X, **kwargs) else: return matrix(X, **kwargs) def _asfptype(self): """Upcast array to a floating point format (if necessary)""" fp_types = ['f', 'd', 'F', 'D'] if self.dtype.char in fp_types: return self else: for fp_type in fp_types: if self.dtype <= np.dtype(fp_type): return self.astype(fp_type) raise TypeError('cannot upcast [%s] to a floating ' 'point format' % self.dtype.name) def __iter__(self): for r in range(self.shape[0]): yield self[r, :] def _getmaxprint(self): """Maximum number of elements to display when printed.""" return self.maxprint def count_nonzero(self): """Number of non-zero entries, equivalent to np.count_nonzero(a.toarray()) Unlike the nnz property, which return the number of stored entries (the length of the data attribute), this method counts the actual number of non-zero entries in data. """ raise NotImplementedError("count_nonzero not implemented for %s." % self.__class__.__name__) def _getnnz(self, axis=None): """Number of stored values, including explicit zeros. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row. See also -------- count_nonzero : Number of non-zero entries """ raise NotImplementedError("getnnz not implemented for %s." % self.__class__.__name__) @property def nnz(self): """Number of stored values, including explicit zeros. See also -------- count_nonzero : Number of non-zero entries """ return self._getnnz() @property def format(self): return self._format def __repr__(self): _, format_name = _formats[self.format] sparse_cls = 'array' if self._is_array else 'matrix' return f"<%dx%d sparse {sparse_cls} of type '%s'\n" \ "\twith %d stored elements in %s format>" % \ (self.shape + (self.dtype.type, self.nnz, format_name)) def __str__(self): maxprint = self._getmaxprint() A = self.tocoo() # helper function, outputs "(i,j) v" def tostr(row, col, data): triples = zip(list(zip(row, col)), data) return '\n'.join([(' %s\t%s' % t) for t in triples]) if self.nnz > maxprint: half = maxprint // 2 out = tostr(A.row[:half], A.col[:half], A.data[:half]) out += "\n :\t:\n" half = maxprint - maxprint//2 out += tostr(A.row[-half:], A.col[-half:], A.data[-half:]) else: out = tostr(A.row, A.col, A.data) return out def __bool__(self): # Simple -- other ideas? if self.shape == (1, 1): return self.nnz != 0 else: raise ValueError("The truth value of an array with more than one " "element is ambiguous. Use a.any() or a.all().") __nonzero__ = __bool__ # What should len(sparse) return? For consistency with dense matrices, # perhaps it should be the number of rows? But for some uses the number of # non-zeros is more important. For now, raise an exception! def __len__(self): raise TypeError("sparse array length is ambiguous; use getnnz()" " or shape[0]") def asformat(self, format, copy=False): """Return this array in the passed format. Parameters ---------- format : {str, None} The desired sparse format ("csr", "csc", "lil", "dok", "array", ...) or None for no conversion. copy : bool, optional If True, the result is guaranteed to not share data with self. Returns ------- A : This array in the passed format. """ if format is None or format == self.format: if copy: return self.copy() else: return self else: try: convert_method = getattr(self, 'to' + format) except AttributeError as e: raise ValueError(f'Format {format} is unknown.') from e # Forward the copy kwarg, if it's accepted. try: return convert_method(copy=copy) except TypeError: return convert_method() ################################################################### # NOTE: All arithmetic operations use csr_matrix by default. # Therefore a new sparse array format just needs to define a # .tocsr() method to provide arithmetic support. Any of these # methods can be overridden for efficiency. #################################################################### def multiply(self, other): """Point-wise multiplication by another array """ return self.tocsr().multiply(other) def maximum(self, other): """Element-wise maximum between this and another array.""" return self.tocsr().maximum(other) def minimum(self, other): """Element-wise minimum between this and another array.""" return self.tocsr().minimum(other) def dot(self, other): """Ordinary dot product Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) >>> v = np.array([1, 0, -1]) >>> A.dot(v) array([ 1, -3, -1], dtype=int64) """ if np.isscalar(other): return self * other else: return self @ other def power(self, n, dtype=None): """Element-wise power.""" return self.tocsr().power(n, dtype=dtype) def __eq__(self, other): return self.tocsr().__eq__(other) def __ne__(self, other): return self.tocsr().__ne__(other) def __lt__(self, other): return self.tocsr().__lt__(other) def __gt__(self, other): return self.tocsr().__gt__(other) def __le__(self, other): return self.tocsr().__le__(other) def __ge__(self, other): return self.tocsr().__ge__(other) def __abs__(self): return abs(self.tocsr()) def __round__(self, ndigits=0): return round(self.tocsr(), ndigits=ndigits) def _add_sparse(self, other): return self.tocsr()._add_sparse(other) def _add_dense(self, other): return self.tocoo()._add_dense(other) def _sub_sparse(self, other): return self.tocsr()._sub_sparse(other) def _sub_dense(self, other): return self.todense() - other def _rsub_dense(self, other): # note: this can't be replaced by other + (-self) for unsigned types return other - self.todense() def __add__(self, other): # self + other if isscalarlike(other): if other == 0: return self.copy() # Now we would add this scalar to every element. raise NotImplementedError('adding a nonzero scalar to a ' 'sparse array is not supported') elif issparse(other): if other.shape != self.shape: raise ValueError("inconsistent shapes") return self._add_sparse(other) elif isdense(other): other = np.broadcast_to(other, self.shape) return self._add_dense(other) else: return NotImplemented def __radd__(self,other): # other + self return self.__add__(other) def __sub__(self, other): # self - other if isscalarlike(other): if other == 0: return self.copy() raise NotImplementedError('subtracting a nonzero scalar from a ' 'sparse array is not supported') elif issparse(other): if other.shape != self.shape: raise ValueError("inconsistent shapes") return self._sub_sparse(other) elif isdense(other): other = np.broadcast_to(other, self.shape) return self._sub_dense(other) else: return NotImplemented def __rsub__(self,other): # other - self if isscalarlike(other): if other == 0: return -self.copy() raise NotImplementedError('subtracting a sparse array from a ' 'nonzero scalar is not supported') elif isdense(other): other = np.broadcast_to(other, self.shape) return self._rsub_dense(other) else: return NotImplemented def _mul_dispatch(self, other): """`np.matrix`-compatible mul, i.e. `dot` or `NotImplemented` interpret other and call one of the following self._mul_scalar() self._mul_vector() self._mul_multivector() self._mul_sparse_matrix() """ # This method has to be different from `__matmul__` because it is also # called by sparse matrix classes. M, N = self.shape if other.__class__ is np.ndarray: # Fast path for the most common case if other.shape == (N,): return self._mul_vector(other) elif other.shape == (N, 1): return self._mul_vector(other.ravel()).reshape(M, 1) elif other.ndim == 2 and other.shape[0] == N: return self._mul_multivector(other) if isscalarlike(other): # scalar value return self._mul_scalar(other) if issparse(other): if self.shape[1] != other.shape[0]: raise ValueError('dimension mismatch') return self._mul_sparse_matrix(other) # If it's a list or whatever, treat it like an array other_a = np.asanyarray(other) if other_a.ndim == 0 and other_a.dtype == np.object_: # Not interpretable as an array; return NotImplemented so that # other's __rmul__ can kick in if that's implemented. return NotImplemented try: other.shape except AttributeError: other = other_a if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1: # dense row or column vector if other.shape != (N,) and other.shape != (N, 1): raise ValueError('dimension mismatch') result = self._mul_vector(np.ravel(other)) if isinstance(other, np.matrix): result = self._ascontainer(result) if other.ndim == 2 and other.shape[1] == 1: # If 'other' was an (nx1) column vector, reshape the result result = result.reshape(-1, 1) return result elif other.ndim == 2: ## # dense 2D array or matrix ("multivector") if other.shape[0] != self.shape[1]: raise ValueError('dimension mismatch') result = self._mul_multivector(np.asarray(other)) if isinstance(other, np.matrix): result = self._ascontainer(result) return result else: raise ValueError('could not interpret dimensions') def __mul__(self, *args, **kwargs): return self.multiply(*args, **kwargs) # by default, use CSR for __mul__ handlers def _mul_scalar(self, other): return self.tocsr()._mul_scalar(other) def _mul_vector(self, other): return self.tocsr()._mul_vector(other) def _mul_multivector(self, other): return self.tocsr()._mul_multivector(other) def _mul_sparse_matrix(self, other): return self.tocsr()._mul_sparse_matrix(other) def _rmul_dispatch(self, other): if isscalarlike(other): return self._mul_scalar(other) else: # Don't use asarray unless we have to try: tr = other.transpose() except AttributeError: tr = np.asarray(other).transpose() ret = self.transpose()._mul_dispatch(tr) if ret is NotImplemented: return NotImplemented return ret.transpose() def __rmul__(self, *args, **kwargs): # other * self return self.multiply(*args, **kwargs) ####################### # matmul (@) operator # ####################### def __matmul__(self, other): if isscalarlike(other): raise ValueError("Scalar operands are not allowed, " "use '*' instead") return self._mul_dispatch(other) def __rmatmul__(self, other): if isscalarlike(other): raise ValueError("Scalar operands are not allowed, " "use '*' instead") return self._rmul_dispatch(other) #################### # Other Arithmetic # #################### def _divide(self, other, true_divide=False, rdivide=False): if isscalarlike(other): if rdivide: if true_divide: return np.true_divide(other, self.todense()) else: return np.divide(other, self.todense()) if true_divide and np.can_cast(self.dtype, np.float_): return self.astype(np.float_)._mul_scalar(1./other) else: r = self._mul_scalar(1./other) scalar_dtype = np.asarray(other).dtype if (np.issubdtype(self.dtype, np.integer) and np.issubdtype(scalar_dtype, np.integer)): return r.astype(self.dtype) else: return r elif isdense(other): if not rdivide: if true_divide: recip = np.true_divide(1., other) else: recip = np.divide(1., other) return self.multiply(recip) else: if true_divide: return np.true_divide(other, self.todense()) else: return np.divide(other, self.todense()) elif issparse(other): if rdivide: return other._divide(self, true_divide, rdivide=False) self_csr = self.tocsr() if true_divide and np.can_cast(self.dtype, np.float_): return self_csr.astype(np.float_)._divide_sparse(other) else: return self_csr._divide_sparse(other) else: return NotImplemented def __truediv__(self, other): return self._divide(other, true_divide=True) def __div__(self, other): # Always do true division return self._divide(other, true_divide=True) def __rtruediv__(self, other): # Implementing this as the inverse would be too magical -- bail out return NotImplemented def __rdiv__(self, other): # Implementing this as the inverse would be too magical -- bail out return NotImplemented def __neg__(self): return -self.tocsr() def __iadd__(self, other): return NotImplemented def __isub__(self, other): return NotImplemented def __imul__(self, other): return NotImplemented def __idiv__(self, other): return self.__itruediv__(other) def __itruediv__(self, other): return NotImplemented def __pow__(self, *args, **kwargs): return self.power(*args, **kwargs) @property def A(self) -> np.ndarray: if self._is_array: warn(np.VisibleDeprecationWarning( "`.A` is deprecated and will be removed in v1.13.0. " "Use `.toarray()` instead." )) return self.toarray() @property def T(self): return self.transpose() @property def H(self): if self._is_array: warn(np.VisibleDeprecationWarning( "`.H` is deprecated and will be removed in v1.13.0. " "Please use `.T.conjugate()` instead." )) return self.T.conjugate() @property def real(self): return self._real() @property def imag(self): return self._imag() @property def size(self): return self._getnnz() def transpose(self, axes=None, copy=False): """ Reverses the dimensions of the sparse array. Parameters ---------- axes : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value. copy : bool, optional Indicates whether or not attributes of `self` should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array being used. Returns ------- p : `self` with the dimensions reversed. See Also -------- numpy.transpose : NumPy's implementation of 'transpose' for ndarrays """ return self.tocsr(copy=copy).transpose(axes=axes, copy=False) def conjugate(self, copy=True): """Element-wise complex conjugation. If the array is of non-complex data type and `copy` is False, this method does nothing and the data is not copied. Parameters ---------- copy : bool, optional If True, the result is guaranteed to not share data with self. Returns ------- A : The element-wise complex conjugate. """ if np.issubdtype(self.dtype, np.complexfloating): return self.tocsr(copy=copy).conjugate(copy=False) elif copy: return self.copy() else: return self def conj(self, copy=True): return self.conjugate(copy=copy) conj.__doc__ = conjugate.__doc__ def _real(self): return self.tocsr()._real() def _imag(self): return self.tocsr()._imag() def nonzero(self): """nonzero indices Returns a tuple of arrays (row,col) containing the indices of the non-zero elements of the array. Examples -------- >>> from scipy.sparse import csr_array >>> A = csr_array([[1,2,0],[0,0,3],[4,0,5]]) >>> A.nonzero() (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2])) """ # convert to COOrdinate format A = self.tocoo() nz_mask = A.data != 0 return (A.row[nz_mask], A.col[nz_mask]) def _getcol(self, j): """Returns a copy of column j of the array, as an (m x 1) sparse array (column vector). """ # Subclasses should override this method for efficiency. # Post-multiply by a (n x 1) column vector 'a' containing all zeros # except for a_j = 1 n = self.shape[1] if j < 0: j += n if j < 0 or j >= n: raise IndexError("index out of bounds") col_selector = self._csc_container(([1], [[j], [0]]), shape=(n, 1), dtype=self.dtype) return self @ col_selector def _getrow(self, i): """Returns a copy of row i of the array, as a (1 x n) sparse array (row vector). """ # Subclasses should override this method for efficiency. # Pre-multiply by a (1 x m) row vector 'a' containing all zeros # except for a_i = 1 m = self.shape[0] if i < 0: i += m if i < 0 or i >= m: raise IndexError("index out of bounds") row_selector = self._csr_container(([1], [[0], [i]]), shape=(1, m), dtype=self.dtype) return row_selector @ self # The following dunder methods cannot be implemented. # # def __array__(self): # # Sparse matrices rely on NumPy wrapping them in object arrays under # # the hood to make unary ufuncs work on them. So we cannot raise # # TypeError here - which would be handy to not give users object # # arrays they probably don't want (they're looking for `.toarray()`). # # # # Conversion with `toarray()` would also break things because of the # # behavior discussed above, plus we want to avoid densification by # # accident because that can too easily blow up memory. # # def __array_ufunc__(self): # # We cannot implement __array_ufunc__ due to mismatching semantics. # # See gh-7707 and gh-7349 for details. # # def __array_function__(self): # # We cannot implement __array_function__ due to mismatching semantics. # # See gh-10362 for details. def todense(self, order=None, out=None): """ Return a dense matrix representation of this sparse array. Parameters ---------- order : {'C', 'F'}, optional Whether to store multi-dimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the `out` argument. out : ndarray, 2-D, optional If specified, uses this array (or `numpy.matrix`) as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array on which you are calling the method. Returns ------- arr : numpy.matrix, 2-D A NumPy matrix object with the same shape and containing the same data represented by the sparse array, with the requested memory order. If `out` was passed and was an array (rather than a `numpy.matrix`), it will be filled with the appropriate values and returned wrapped in a `numpy.matrix` object that shares the same memory. """ return self._ascontainer(self.toarray(order=order, out=out)) def toarray(self, order=None, out=None): """ Return a dense ndarray representation of this sparse array. Parameters ---------- order : {'C', 'F'}, optional Whether to store multidimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the `out` argument. out : ndarray, 2-D, optional If specified, uses this array as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array on which you are calling the method. For most sparse types, `out` is required to be memory contiguous (either C or Fortran ordered). Returns ------- arr : ndarray, 2-D An array with the same shape and containing the same data represented by the sparse array, with the requested memory order. If `out` was passed, the same object is returned after being modified in-place to contain the appropriate values. """ return self.tocoo(copy=False).toarray(order=order, out=out) # Any sparse array format deriving from _spbase must define one of # tocsr or tocoo. The other conversion methods may be implemented for # efficiency, but are not required. def tocsr(self, copy=False): """Convert this array to Compressed Sparse Row format. With copy=False, the data/indices may be shared between this array and the resultant csr_array. """ return self.tocoo(copy=copy).tocsr(copy=False) def todok(self, copy=False): """Convert this array to Dictionary Of Keys format. With copy=False, the data/indices may be shared between this array and the resultant dok_array. """ return self.tocoo(copy=copy).todok(copy=False) def tocoo(self, copy=False): """Convert this array to COOrdinate format. With copy=False, the data/indices may be shared between this array and the resultant coo_array. """ return self.tocsr(copy=False).tocoo(copy=copy) def tolil(self, copy=False): """Convert this array to List of Lists format. With copy=False, the data/indices may be shared between this array and the resultant lil_array. """ return self.tocsr(copy=False).tolil(copy=copy) def todia(self, copy=False): """Convert this array to sparse DIAgonal format. With copy=False, the data/indices may be shared between this array and the resultant dia_array. """ return self.tocoo(copy=copy).todia(copy=False) def tobsr(self, blocksize=None, copy=False): """Convert this array to Block Sparse Row format. With copy=False, the data/indices may be shared between this array and the resultant bsr_array. When blocksize=(R, C) is provided, it will be used for construction of the bsr_array. """ return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy) def tocsc(self, copy=False): """Convert this array to Compressed Sparse Column format. With copy=False, the data/indices may be shared between this array and the resultant csc_array. """ return self.tocsr(copy=copy).tocsc(copy=False) def copy(self): """Returns a copy of this array. No data/indices will be shared between the returned value and current array. """ return self.__class__(self, copy=True) def sum(self, axis=None, dtype=None, out=None): """ Sum the array elements over a given axis. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the sum of all the array elements, returning a scalar (i.e., `axis` = `None`). dtype : dtype, optional The type of the returned array and of the accumulator in which the elements are summed. The dtype of `a` is used by default unless `a` has an integer dtype of less precision than the default platform integer. In that case, if `a` is signed then the platform integer is used while if `a` is unsigned then an unsigned integer of the same precision as the platform integer is used. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- sum_along_axis : np.matrix A matrix with the same shape as `self`, with the specified axis removed. See Also -------- numpy.matrix.sum : NumPy's implementation of 'sum' for matrices """ validateaxis(axis) # We use multiplication by a matrix of ones to achieve this. # For some sparse array formats more efficient methods are # possible -- these should override this function. m, n = self.shape # Mimic numpy's casting. res_dtype = get_sum_dtype(self.dtype) if axis is None: # sum over rows and columns return ( self @ self._ascontainer(np.ones((n, 1), dtype=res_dtype)) ).sum(dtype=dtype, out=out) if axis < 0: axis += 2 # axis = 0 or 1 now if axis == 0: # sum over columns ret = self._ascontainer( np.ones((1, m), dtype=res_dtype) ) @ self else: # sum over rows ret = self @ self._ascontainer( np.ones((n, 1), dtype=res_dtype) ) if out is not None and out.shape != ret.shape: raise ValueError("dimensions do not match") return ret.sum(axis=axis, dtype=dtype, out=out) def mean(self, axis=None, dtype=None, out=None): """ Compute the arithmetic mean along the specified axis. Returns the average of the array elements. The average is taken over all elements in the array by default, otherwise over the specified axis. `float64` intermediate and return values are used for integer inputs. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the mean is computed. The default is to compute the mean of all elements in the array (i.e., `axis` = `None`). dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is `float64`; for floating point inputs, it is the same as the input dtype. .. versionadded:: 0.18.0 out : np.matrix, optional Alternative output matrix in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. .. versionadded:: 0.18.0 Returns ------- m : np.matrix See Also -------- numpy.matrix.mean : NumPy's implementation of 'mean' for matrices """ def _is_integral(dtype): return (np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_)) validateaxis(axis) res_dtype = self.dtype.type integral = _is_integral(self.dtype) # output dtype if dtype is None: if integral: res_dtype = np.float64 else: res_dtype = np.dtype(dtype).type # intermediate dtype for summation inter_dtype = np.float64 if integral else res_dtype inter_self = self.astype(inter_dtype) if axis is None: return (inter_self / np.array( self.shape[0] * self.shape[1]))\ .sum(dtype=res_dtype, out=out) if axis < 0: axis += 2 # axis = 0 or 1 now if axis == 0: return (inter_self * (1.0 / self.shape[0])).sum( axis=0, dtype=res_dtype, out=out) else: return (inter_self * (1.0 / self.shape[1])).sum( axis=1, dtype=res_dtype, out=out) def diagonal(self, k=0): """Returns the kth diagonal of the array. Parameters ---------- k : int, optional Which diagonal to get, corresponding to elements a[i, i+k]. Default: 0 (the main diagonal). .. versionadded:: 1.0 See also -------- numpy.diagonal : Equivalent numpy function. Examples -------- >>> from scipy.sparse import csr_array >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) >>> A.diagonal() array([1, 0, 5]) >>> A.diagonal(k=1) array([2, 3]) """ return self.tocsr().diagonal(k=k) def trace(self, offset=0): """Returns the sum along diagonals of the sparse array. Parameters ---------- offset : int, optional Which diagonal to get, corresponding to elements a[i, i+offset]. Default: 0 (the main diagonal). """ return self.diagonal(k=offset).sum() def setdiag(self, values, k=0): """ Set diagonal or off-diagonal elements of the array. Parameters ---------- values : array_like New values of the diagonal elements. Values may have any length. If the diagonal is longer than values, then the remaining diagonal entries will not be set. If values are longer than the diagonal, then the remaining values are ignored. If a scalar value is given, all of the diagonal is set to it. k : int, optional Which off-diagonal to set, corresponding to elements a[i,i+k]. Default: 0 (the main diagonal). """ M, N = self.shape if (k > 0 and k >= N) or (k < 0 and -k >= M): raise ValueError("k exceeds array dimensions") self._setdiag(np.asarray(values), k) def _setdiag(self, values, k): """This part of the implementation gets overridden by the different formats. """ M, N = self.shape if k < 0: if values.ndim == 0: # broadcast max_index = min(M+k, N) for i in range(max_index): self[i - k, i] = values else: max_index = min(M+k, N, len(values)) if max_index <= 0: return for i, v in enumerate(values[:max_index]): self[i - k, i] = v else: if values.ndim == 0: # broadcast max_index = min(M, N-k) for i in range(max_index): self[i, i + k] = values else: max_index = min(M, N-k, len(values)) if max_index <= 0: return for i, v in enumerate(values[:max_index]): self[i, i + k] = v def _process_toarray_args(self, order, out): if out is not None: if order is not None: raise ValueError('order cannot be specified if out ' 'is not None') if out.shape != self.shape or out.dtype != self.dtype: raise ValueError('out array must be same dtype and shape as ' 'sparse array') out[...] = 0. return out else: return np.zeros(self.shape, dtype=self.dtype, order=order) def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False): """ Determine index dtype for array. This wraps _sputils.get_index_dtype, providing compatibility for both array and matrix API sparse matrices. Matrix API sparse matrices would attempt to downcast the indices - which can be computationally expensive and undesirable for users. The array API changes this behaviour. See discussion: https://github.com/scipy/scipy/issues/16774 The get_index_dtype import is due to implementation details of the test suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a lower int32 max value for checks on the matrix API's downcasting behaviour. """ from ._sputils import get_index_dtype # Don't check contents for array API return get_index_dtype(arrays, maxval, (check_contents and not self._is_array)) ## All methods below are deprecated and should be removed in ## scipy 1.13.0 ## ## Also uncomment the definition of shape above. def get_shape(self): """Get shape of a sparse array. .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use `X.shape` instead. """ msg = ( "`get_shape` is deprecated and will be removed in v1.13.0; " "use `X.shape` instead." ) warn(msg, DeprecationWarning, stacklevel=2) return self._shape def set_shape(self, shape): """See `reshape`. .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use `X.reshape` instead. """ msg = ( "Shape assignment is deprecated and will be removed in v1.13.0; " "use `reshape` instead." ) warn(msg, DeprecationWarning, stacklevel=2) # Make sure copy is False since this is in place # Make sure format is unchanged because we are doing a __dict__ swap new_self = self.reshape(shape, copy=False).asformat(self.format) self.__dict__ = new_self.__dict__ shape = property( fget=lambda self: self._shape, fset=set_shape, doc="""The shape of the array. Note that, starting in SciPy 1.13.0, this property will no longer be settable. To change the array shape, use `X.reshape` instead. """ ) # noqa: F811 def asfptype(self): """Upcast array to a floating point format (if necessary) .. deprecated:: 1.11.0 This method is for internal use only, and will be removed from the public API in SciPy 1.13.0. """ msg = ( "`asfptype` is an internal function, and is deprecated " "as part of the public API. It will be removed in v1.13.0." ) warn(msg, DeprecationWarning, stacklevel=2) return self._asfptype() def getmaxprint(self): """Maximum number of elements to display when printed. .. deprecated:: 1.11.0 This method is for internal use only, and will be removed from the public API in SciPy 1.13.0. """ msg = ( "`getmaxprint` is an internal function, and is deprecated " "as part of the public API. It will be removed in v1.13.0." ) warn(msg, DeprecationWarning, stacklevel=2) return self._getmaxprint() def getformat(self): """Matrix storage format. .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use `X.format` instead. """ msg = ( "`getformat` is deprecated and will be removed in v1.13.0; " "use `X.format` instead." ) warn(msg, DeprecationWarning, stacklevel=2) return self.format def getnnz(self, axis=None): """Number of stored values, including explicit zeros. .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use `X.nnz` instead. The `axis` argument will no longer be supported; please let us know if you still need this functionality. Parameters ---------- axis : None, 0, or 1 Select between the number of values across the whole array, in each column, or in each row. See also -------- count_nonzero : Number of non-zero entries """ msg = ( "`getnnz` is deprecated and will be removed in v1.13.0; " "use `X.nnz` instead." ) warn(msg, DeprecationWarning, stacklevel=2) return self._getnnz(axis=axis) def getH(self): """Return the Hermitian transpose of this array. .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use `X.conj().T` instead. """ msg = ( "`getH` is deprecated and will be removed in v1.13.0; " "use `X.conj().T` instead." ) warn(msg, DeprecationWarning, stacklevel=2) return self.conjugate().transpose() def getcol(self, j): """Returns a copy of column j of the array, as an (m x 1) sparse array (column vector). .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use array indexing instead. """ msg = ( "`getcol` is deprecated and will be removed in v1.13.0; " f"use `X[:, [{j}]]` instead." ) warn(msg, DeprecationWarning, stacklevel=2) return self._getcol(j) def getrow(self, i): """Returns a copy of row i of the array, as a (1 x n) sparse array (row vector). .. deprecated:: 1.11.0 This method will be removed in SciPy 1.13.0. Use array indexing instead. """ msg = ( "`getrow` is deprecated and will be removed in v1.13.0; " f"use `X[[{i}]]` instead." ) warn(msg, DeprecationWarning, stacklevel=2) return self._getrow(i) ## End 1.13.0 deprecated methods class sparray: """A namespace class to separate sparray from spmatrix""" pass sparray.__doc__ = _spbase.__doc__ def issparse(x): """Is `x` of a sparse array type? Parameters ---------- x object to check for being a sparse array Returns ------- bool True if `x` is a sparse array or a sparse matrix, False otherwise Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array, csr_matrix, issparse >>> issparse(csr_matrix([[5]])) True >>> issparse(csr_array([[5]])) True >>> issparse(np.array([[5]])) False >>> issparse(5) False """ return isinstance(x, _spbase) def isspmatrix(x): """Is `x` of a sparse matrix type? Parameters ---------- x object to check for being a sparse matrix Returns ------- bool True if `x` is a sparse matrix, False otherwise Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array, csr_matrix, isspmatrix >>> isspmatrix(csr_matrix([[5]])) True >>> isspmatrix(csr_array([[5]])) False >>> isspmatrix(np.array([[5]])) False >>> isspmatrix(5) False """ return isinstance(x, spmatrix)
50,003
31.962426
87
py
scipy
scipy-main/scipy/sparse/_matrix_io.py
import numpy as np import scipy.sparse __all__ = ['save_npz', 'load_npz'] # Make loading safe vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): """ Save a sparse matrix to a file using ``.npz`` format. Parameters ---------- file : str or file-like object Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string, the ``.npz`` extension will be appended to the file name if it is not already there. matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``) The sparse matrix to save. compressed : bool, optional Allow compressing the file. Default: True See Also -------- scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format. numpy.savez: Save several arrays into a ``.npz`` archive. numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive. Examples -------- Store sparse matrix to disk, and load it again: >>> import numpy as np >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64) """ arrays_dict = {} if matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError(f'Save is not implemented for sparse matrix of format {matrix.format}.') arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file): """ Load a sparse matrix from a file using ``.npz`` format. Parameters ---------- file : str or file-like object Either the file name (string) or an open file (file-like object) where the data will be loaded. Returns ------- result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix A sparse matrix containing the loaded data. Raises ------ OSError If the input file does not exist or cannot be read. See Also -------- scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format. numpy.load: Load several arrays from a ``.npz`` archive. Examples -------- Store sparse matrix to disk, and load it again: >>> import numpy as np >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64) """ with np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format = loaded['format'] except KeyError as e: raise ValueError(f'The file {file} does not contain a sparse matrix.') from e matrix_format = matrix_format.item() if not isinstance(matrix_format, str): # Play safe with Python 2 vs 3 backward compatibility; # files saved with SciPy < 1.0.0 may contain unicode or bytes. matrix_format = matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, f'{matrix_format}_matrix') except AttributeError as e: raise ValueError(f'Unknown matrix format "{matrix_format}"') from e if matrix_format in ('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is not implemented for ' 'sparse matrix of format {}.'.format(matrix_format))
5,347
34.184211
106
py
scipy
scipy-main/scipy/sparse/_csr.py
"""Compressed Sparse Row matrix format""" __docformat__ = "restructuredtext en" __all__ = ['csr_array', 'csr_matrix', 'isspmatrix_csr'] import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._base import _spbase, sparray from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks, get_csr_submatrix) from ._sputils import upcast from ._compressed import _cs_matrix class _csr_base(_cs_matrix): """ Compressed Sparse Row matrix This can be instantiated in several ways: csr_array(D) with a dense matrix or rank-2 ndarray D csr_array(S) with another sparse matrix S (equivalent to S.tocsr()) csr_array((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. csr_array((data, (row_ind, col_ind)), [shape=(M, N)]) where ``data``, ``row_ind`` and ``col_ind`` satisfy the relationship ``a[row_ind[k], col_ind[k]] = data[k]``. csr_array((data, indices, indptr), [shape=(M, N)]) is the standard CSR representation where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of stored values, including explicit zeros data CSR format data array of the matrix indices CSR format index array of the matrix indptr CSR format index pointer array of the matrix has_sorted_indices Whether indices are sorted Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the CSR format - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. - efficient row slicing - fast matrix vector products Disadvantages of the CSR format - slow column slicing operations (consider CSC) - changes to the sparsity structure are expensive (consider LIL or DOK) Canonical Format - Within each row, indices are sorted by column. - There are no duplicate entries. Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_array >>> csr_array((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 0, 1, 2, 2, 2]) >>> col = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csr_array((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 2], [0, 0, 3], [4, 5, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csr_array((data, indices, indptr), shape=(3, 3)).toarray() array([[1, 0, 2], [0, 0, 3], [4, 5, 6]]) Duplicate entries are summed together: >>> row = np.array([0, 1, 2, 0]) >>> col = np.array([0, 1, 1, 0]) >>> data = np.array([1, 2, 4, 8]) >>> csr_array((data, (row, col)), shape=(3, 3)).toarray() array([[9, 0, 0], [0, 2, 0], [0, 4, 0]]) As an example of how to construct a CSR matrix incrementally, the following snippet builds a term-document matrix from texts: >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]] >>> indptr = [0] >>> indices = [] >>> data = [] >>> vocabulary = {} >>> for d in docs: ... for term in d: ... index = vocabulary.setdefault(term, len(vocabulary)) ... indices.append(index) ... data.append(1) ... indptr.append(len(indices)) ... >>> csr_array((data, indices, indptr), dtype=int).toarray() array([[2, 1, 0, 0], [0, 1, 1, 1]]) """ _format = 'csr' def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") M, N = self.shape return self._csc_container((self.data, self.indices, self.indptr), shape=(N, M), copy=copy) transpose.__doc__ = _spbase.transpose.__doc__ def tolil(self, copy=False): lil = self._lil_container(self.shape, dtype=self.dtype) self.sum_duplicates() ptr,ind,dat = self.indptr,self.indices,self.data rows, data = lil.rows, lil.data for n in range(self.shape[0]): start = ptr[n] end = ptr[n+1] rows[n] = ind[start:end].tolist() data[n] = dat[start:end].tolist() return lil tolil.__doc__ = _spbase.tolil.__doc__ def tocsr(self, copy=False): if copy: return self.copy() else: return self tocsr.__doc__ = _spbase.tocsr.__doc__ def tocsc(self, copy=False): idx_dtype = self._get_index_dtype((self.indptr, self.indices), maxval=max(self.nnz, self.shape[0])) indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) csr_tocsc(self.shape[0], self.shape[1], self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, indptr, indices, data) A = self._csc_container((data, indices, indptr), shape=self.shape) A.has_sorted_indices = True return A tocsc.__doc__ = _spbase.tocsc.__doc__ def tobsr(self, blocksize=None, copy=True): if blocksize is None: from ._spfuncs import estimate_blocksize return self.tobsr(blocksize=estimate_blocksize(self)) elif blocksize == (1,1): arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr) return self._bsr_container(arg1, shape=self.shape, copy=copy) else: R,C = blocksize M,N = self.shape if R < 1 or C < 1 or M % R != 0 or N % C != 0: raise ValueError('invalid blocksize %s' % blocksize) blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices) idx_dtype = self._get_index_dtype((self.indptr, self.indices), maxval=max(N//C, blks)) indptr = np.empty(M//R+1, dtype=idx_dtype) indices = np.empty(blks, dtype=idx_dtype) data = np.zeros((blks,R,C), dtype=self.dtype) csr_tobsr(M, N, R, C, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, indptr, indices, data.ravel()) return self._bsr_container( (data, indices, indptr), shape=self.shape ) tobsr.__doc__ = _spbase.tobsr.__doc__ # these functions are used by the parent class (_cs_matrix) # to remove redundancy between csc_matrix and csr_array def _swap(self, x): """swap the members of x if this is a column-oriented matrix """ return x def __iter__(self): indptr = np.zeros(2, dtype=self.indptr.dtype) shape = (1, self.shape[1]) i0 = 0 for i1 in self.indptr[1:]: indptr[1] = i1 - i0 indices = self.indices[i0:i1] data = self.data[i0:i1] yield self.__class__( (data, indices, indptr), shape=shape, copy=True ) i0 = i1 def _getrow(self, i): """Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector). """ M, N = self.shape i = int(i) if i < 0: i += M if i < 0 or i >= M: raise IndexError('index (%d) out of range' % i) indptr, indices, data = get_csr_submatrix( M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N) return self.__class__((data, indices, indptr), shape=(1, N), dtype=self.dtype, copy=False) def _getcol(self, i): """Returns a copy of column i of the matrix, as a (m x 1) CSR matrix (column vector). """ M, N = self.shape i = int(i) if i < 0: i += N if i < 0 or i >= N: raise IndexError('index (%d) out of range' % i) indptr, indices, data = get_csr_submatrix( M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1) return self.__class__((data, indices, indptr), shape=(M, 1), dtype=self.dtype, copy=False) def _get_intXarray(self, row, col): return self._getrow(row)._minor_index_fancy(col) def _get_intXslice(self, row, col): if col.step in (1, None): return self._get_submatrix(row, col, copy=True) # TODO: uncomment this once it's faster: # return self._getrow(row)._minor_slice(col) M, N = self.shape start, stop, stride = col.indices(N) ii, jj = self.indptr[row:row+2] row_indices = self.indices[ii:jj] row_data = self.data[ii:jj] if stride > 0: ind = (row_indices >= start) & (row_indices < stop) else: ind = (row_indices <= start) & (row_indices > stop) if abs(stride) > 1: ind &= (row_indices - start) % stride == 0 row_indices = (row_indices[ind] - start) // stride row_data = row_data[ind] row_indptr = np.array([0, len(row_indices)]) if stride < 0: row_data = row_data[::-1] row_indices = abs(row_indices[::-1]) shape = (1, max(0, int(np.ceil(float(stop - start) / stride)))) return self.__class__((row_data, row_indices, row_indptr), shape=shape, dtype=self.dtype, copy=False) def _get_sliceXint(self, row, col): if row.step in (1, None): return self._get_submatrix(row, col, copy=True) return self._major_slice(row)._get_submatrix(minor=col) def _get_sliceXarray(self, row, col): return self._major_slice(row)._minor_index_fancy(col) def _get_arrayXint(self, row, col): return self._major_index_fancy(row)._get_submatrix(minor=col) def _get_arrayXslice(self, row, col): if col.step not in (1, None): col = np.arange(*col.indices(self.shape[1])) return self._get_arrayXarray(row, col) return self._major_index_fancy(row)._get_submatrix(minor=col) def isspmatrix_csr(x): """Is `x` of csr_matrix type? Parameters ---------- x object to check for being a csr matrix Returns ------- bool True if `x` is a csr matrix, False otherwise Examples -------- >>> from scipy.sparse import csr_array, csr_matrix, coo_matrix, isspmatrix_csr >>> isspmatrix_csr(csr_matrix([[5]])) True >>> isspmatrix_csr(csr_array([[5]])) False >>> isspmatrix_csr(coo_matrix([[5]])) False """ return isinstance(x, csr_matrix) # This namespace class separates array from matrix with isinstance class csr_array(_csr_base, sparray): pass csr_array.__doc__ = _csr_base.__doc__ class csr_matrix(spmatrix, _csr_base): pass csr_matrix.__doc__ = _array_doc_to_matrix(_csr_base.__doc__)
12,064
31.259358
82
py
scipy
scipy-main/scipy/sparse/sputils.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _sputils __all__ = [ # noqa: F822 'asmatrix', 'check_reshape_kwargs', 'check_shape', 'downcast_intp_index', 'get_index_dtype', 'get_sum_dtype', 'getdata', 'getdtype', 'is_pydata_spmatrix', 'isdense', 'isintlike', 'ismatrix', 'isscalarlike', 'issequence', 'isshape', 'matrix', 'operator', 'prod', 'supported_dtypes', 'sys', 'to_native', 'upcast', 'upcast_char', 'upcast_scalar', 'validateaxis', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.sputils is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.sputils` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_sputils, name)
1,187
21.415094
76
py
scipy
scipy-main/scipy/sparse/__init__.py
""" ===================================== Sparse matrices (:mod:`scipy.sparse`) ===================================== .. currentmodule:: scipy.sparse .. toctree:: :hidden: sparse.csgraph sparse.linalg SciPy 2-D sparse array package for numeric data. .. note:: This package is switching to an array interface, compatible with NumPy arrays, from the older matrix interface. We recommend that you use the array objects (`bsr_array`, `coo_array`, etc.) for all new work. When using the array interface, please note that: - ``x * y`` no longer performs matrix multiplication, but element-wise multiplication (just like with NumPy arrays). To make code work with both arrays and matrices, use ``x @ y`` for matrix multiplication. - Operations such as `sum`, that used to produce dense matrices, now produce arrays, whose multiplication behavior differs similarly. - Sparse arrays currently must be two-dimensional. This also means that all *slicing* operations on these objects must produce two-dimensional results, or they will result in an error. This will be addressed in a future version. The construction utilities (`eye`, `kron`, `random`, `diags`, etc.) have not yet been ported, but their results can be wrapped into arrays:: A = csr_array(eye(3)) Contents ======== Sparse array classes -------------------- .. autosummary:: :toctree: generated/ bsr_array - Block Sparse Row array coo_array - A sparse array in COOrdinate format csc_array - Compressed Sparse Column array csr_array - Compressed Sparse Row array dia_array - Sparse array with DIAgonal storage dok_array - Dictionary Of Keys based sparse array lil_array - Row-based list of lists sparse array sparray - Sparse array base class Sparse matrix classes --------------------- .. autosummary:: :toctree: generated/ bsr_matrix - Block Sparse Row matrix coo_matrix - A sparse matrix in COOrdinate format csc_matrix - Compressed Sparse Column matrix csr_matrix - Compressed Sparse Row matrix dia_matrix - Sparse matrix with DIAgonal storage dok_matrix - Dictionary Of Keys based sparse matrix lil_matrix - Row-based list of lists sparse matrix spmatrix - Sparse matrix base class Functions --------- Building sparse arrays: .. autosummary:: :toctree: generated/ diags_array - Return a sparse array from diagonals Building sparse matrices: .. autosummary:: :toctree: generated/ eye - Sparse MxN matrix whose k-th diagonal is all ones identity - Identity matrix in sparse format kron - kronecker product of two sparse matrices kronsum - kronecker sum of sparse matrices diags - Return a sparse matrix from diagonals spdiags - Return a sparse matrix from diagonals block_diag - Build a block diagonal sparse matrix tril - Lower triangular portion of a matrix in sparse format triu - Upper triangular portion of a matrix in sparse format bmat - Build a sparse matrix from sparse sub-blocks hstack - Stack sparse matrices horizontally (column wise) vstack - Stack sparse matrices vertically (row wise) rand - Random values in a given shape random - Random values in a given shape Save and load sparse matrices: .. autosummary:: :toctree: generated/ save_npz - Save a sparse matrix to a file using ``.npz`` format. load_npz - Load a sparse matrix from a file using ``.npz`` format. Sparse matrix tools: .. autosummary:: :toctree: generated/ find Identifying sparse matrices: .. autosummary:: :toctree: generated/ issparse isspmatrix isspmatrix_csc isspmatrix_csr isspmatrix_bsr isspmatrix_lil isspmatrix_dok isspmatrix_coo isspmatrix_dia Submodules ---------- .. autosummary:: csgraph - Compressed sparse graph routines linalg - sparse linear algebra routines Exceptions ---------- .. autosummary:: :toctree: generated/ SparseEfficiencyWarning SparseWarning Usage information ================= There are seven available sparse matrix types: 1. csc_matrix: Compressed Sparse Column format 2. csr_matrix: Compressed Sparse Row format 3. bsr_matrix: Block Sparse Row format 4. lil_matrix: List of Lists format 5. dok_matrix: Dictionary of Keys format 6. coo_matrix: COOrdinate format (aka IJV, triplet format) 7. dia_matrix: DIAgonal format To construct a matrix efficiently, use either dok_matrix or lil_matrix. The lil_matrix class supports basic slicing and fancy indexing with a similar syntax to NumPy arrays. As illustrated below, the COO format may also be used to efficiently construct matrices. Despite their similarity to NumPy arrays, it is **strongly discouraged** to use NumPy functions directly on these matrices because NumPy may not properly convert them for computations, leading to unexpected (and incorrect) results. If you do want to apply a NumPy function to these matrices, first check if SciPy has its own implementation for the given sparse matrix class, or **convert the sparse matrix to a NumPy array** (e.g., using the `toarray()` method of the class) first before applying the method. To perform manipulations such as multiplication or inversion, first convert the matrix to either CSC or CSR format. The lil_matrix format is row-based, so conversion to CSR is efficient, whereas conversion to CSC is less so. All conversions among the CSR, CSC, and COO formats are efficient, linear-time operations. Matrix vector product --------------------- To do a vector product between a sparse matrix and a vector simply use the matrix `dot` method, as described in its docstring: >>> import numpy as np >>> from scipy.sparse import csr_matrix >>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) >>> v = np.array([1, 0, -1]) >>> A.dot(v) array([ 1, -3, -1], dtype=int64) .. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices, therefore using it will result on unexpected results or errors. The corresponding dense array should be obtained first instead: >>> np.dot(A.toarray(), v) array([ 1, -3, -1], dtype=int64) but then all the performance advantages would be lost. The CSR format is specially suitable for fast matrix vector products. Example 1 --------- Construct a 1000x1000 lil_matrix and add some values to it: >>> from scipy.sparse import lil_matrix >>> from scipy.sparse.linalg import spsolve >>> from numpy.linalg import solve, norm >>> from numpy.random import rand >>> A = lil_matrix((1000, 1000)) >>> A[0, :100] = rand(100) >>> A[1, 100:200] = A[0, :100] >>> A.setdiag(rand(1000)) Now convert it to CSR format and solve A x = b for x: >>> A = A.tocsr() >>> b = rand(1000) >>> x = spsolve(A, b) Convert it to a dense matrix and solve, and check that the result is the same: >>> x_ = solve(A.toarray(), b) Now we can compute norm of the error with: >>> err = norm(x-x_) >>> err < 1e-10 True It should be small :) Example 2 --------- Construct a matrix in COO format: >>> from scipy import sparse >>> from numpy import array >>> I = array([0,3,1,0]) >>> J = array([0,3,1,2]) >>> V = array([4,5,7,9]) >>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4)) Notice that the indices do not need to be sorted. Duplicate (i,j) entries are summed when converting to CSR or CSC. >>> I = array([0,0,1,3,1,0,0]) >>> J = array([0,2,1,3,1,0,0]) >>> V = array([1,1,1,1,1,1,1]) >>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr() This is useful for constructing finite-element stiffness and mass matrices. Further details --------------- CSR column indices are not necessarily sorted. Likewise for CSC row indices. Use the .sorted_indices() and .sort_indices() methods when sorted indices are required (e.g., when passing data to other libraries). """ # Original code by Travis Oliphant. # Modified and extended by Ed Schofield, Robert Cimrman, # Nathan Bell, and Jake Vanderplas. import warnings as _warnings from ._base import * from ._csr import * from ._csc import * from ._lil import * from ._dok import * from ._coo import * from ._dia import * from ._bsr import * from ._construct import * from ._extract import * from ._matrix import spmatrix from ._matrix_io import * # For backward compatibility with v0.19. from . import csgraph # Deprecated namespaces, to be removed in v2.0.0 from . import ( base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract, lil, sparsetools, sputils ) __all__ = [s for s in dir() if not s.startswith('_')] # Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15 _warnings.filterwarnings('ignore', message='the matrix subclass is not the recommended way') from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
8,781
27.329032
92
py
scipy
scipy-main/scipy/sparse/coo.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _coo __all__ = [ # noqa: F822 'SparseEfficiencyWarning', 'check_reshape_kwargs', 'check_shape', 'coo_matrix', 'coo_matvec', 'coo_tocsr', 'coo_todense', 'downcast_intp_index', 'get_index_dtype', 'getdata', 'getdtype', 'isshape', 'isspmatrix', 'isspmatrix_coo', 'operator', 'spmatrix', 'to_native', 'upcast', 'upcast_char', 'warn', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.coo is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.coo` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_coo, name)
1,091
21.75
76
py
scipy
scipy-main/scipy/sparse/_coo.py
""" A sparse matrix in COOrdinate or 'triplet' format""" __docformat__ = "restructuredtext en" __all__ = ['coo_array', 'coo_matrix', 'isspmatrix_coo'] from warnings import warn import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._sparsetools import coo_tocsr, coo_todense, coo_matvec from ._base import issparse, SparseEfficiencyWarning, _spbase, sparray from ._data import _data_matrix, _minmax_mixin from ._sputils import (upcast, upcast_char, to_native, isshape, getdtype, getdata, downcast_intp_index, check_shape, check_reshape_kwargs) import operator class _coo_base(_data_matrix, _minmax_mixin): """ A sparse matrix in COOrdinate format. Also known as the 'ijv' or 'triplet' format. This can be instantiated in several ways: coo_array(D) with a dense matrix D coo_array(S) with another sparse matrix S (equivalent to S.tocoo()) coo_array((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. coo_array((data, (i, j)), [shape=(M, N)]) to construct from three arrays: 1. data[:] the entries of the matrix, in any order 2. i[:] the row indices of the matrix entries 3. j[:] the column indices of the matrix entries Where ``A[i[k], j[k]] = data[k]``. When shape is not specified, it is inferred from the index arrays Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of stored values, including explicit zeros data COO format data array of the matrix row COO format row index array of the matrix col COO format column index array of the matrix Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the COO format - facilitates fast conversion among sparse formats - permits duplicate entries (see example) - very fast conversion to and from CSR/CSC formats Disadvantages of the COO format - does not directly support: + arithmetic operations + slicing Intended Usage - COO is a fast format for constructing sparse matrices - Once a matrix has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - By default when converting to CSR or CSC format, duplicate (i,j) entries will be summed together. This facilitates efficient construction of finite element matrices and the like. (see example) Canonical format - Entries and indices sorted by row, then column. - There are no duplicate entries (i.e. duplicate (i,j) locations) - Arrays MAY have explicit zeros. Examples -------- >>> # Constructing an empty matrix >>> import numpy as np >>> from scipy.sparse import coo_array >>> coo_array((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> # Constructing a matrix using ijv format >>> row = np.array([0, 3, 1, 0]) >>> col = np.array([0, 3, 1, 2]) >>> data = np.array([4, 5, 7, 9]) >>> coo_array((data, (row, col)), shape=(4, 4)).toarray() array([[4, 0, 9, 0], [0, 7, 0, 0], [0, 0, 0, 0], [0, 0, 0, 5]]) >>> # Constructing a matrix with duplicate indices >>> row = np.array([0, 0, 1, 3, 1, 0, 0]) >>> col = np.array([0, 2, 1, 3, 1, 0, 0]) >>> data = np.array([1, 1, 1, 1, 1, 1, 1]) >>> coo = coo_array((data, (row, col)), shape=(4, 4)) >>> # Duplicate indices are maintained until implicitly or explicitly summed >>> np.max(coo.data) 1 >>> coo.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ _format = 'coo' def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) if isinstance(arg1, tuple): if isshape(arg1): M, N = arg1 self._shape = check_shape((M, N)) idx_dtype = self._get_index_dtype(maxval=max(M, N)) data_dtype = getdtype(dtype, default=float) self.row = np.array([], dtype=idx_dtype) self.col = np.array([], dtype=idx_dtype) self.data = np.array([], dtype=data_dtype) self.has_canonical_format = True else: try: obj, (row, col) = arg1 except (TypeError, ValueError) as e: raise TypeError('invalid input format') from e if shape is None: if len(row) == 0 or len(col) == 0: raise ValueError('cannot infer dimensions from zero ' 'sized index arrays') M = operator.index(np.max(row)) + 1 N = operator.index(np.max(col)) + 1 self._shape = check_shape((M, N)) else: # Use 2 steps to ensure shape has length 2. M, N = shape self._shape = check_shape((M, N)) idx_dtype = self._get_index_dtype((row, col), maxval=max(self.shape), check_contents=True) self.row = np.array(row, copy=copy, dtype=idx_dtype) self.col = np.array(col, copy=copy, dtype=idx_dtype) self.data = getdata(obj, copy=copy, dtype=dtype) self.has_canonical_format = False else: if issparse(arg1): if arg1.format == self.format and copy: self.row = arg1.row.copy() self.col = arg1.col.copy() self.data = arg1.data.copy() self._shape = check_shape(arg1.shape) else: coo = arg1.tocoo() self.row = coo.row self.col = coo.col self.data = coo.data self._shape = check_shape(coo.shape) self.has_canonical_format = False else: #dense argument M = np.atleast_2d(np.asarray(arg1)) if M.ndim != 2: raise TypeError('expected dimension <= 2 array or matrix') self._shape = check_shape(M.shape) if shape is not None: if check_shape(shape) != self._shape: raise ValueError('inconsistent shapes: %s != %s' % (shape, self._shape)) index_dtype = self._get_index_dtype(maxval=max(self._shape)) row, col = M.nonzero() self.row = row.astype(index_dtype, copy=False) self.col = col.astype(index_dtype, copy=False) self.data = M[self.row, self.col] self.has_canonical_format = True if dtype is not None: self.data = self.data.astype(dtype, copy=False) self._check() def reshape(self, *args, **kwargs): shape = check_shape(args, self.shape) order, copy = check_reshape_kwargs(kwargs) # Return early if reshape is not required if shape == self.shape: if copy: return self.copy() else: return self nrows, ncols = self.shape if order == 'C': # Upcast to avoid overflows: the coo_array constructor # below will downcast the results to a smaller dtype, if # possible. dtype = self._get_index_dtype(maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1))) flat_indices = np.multiply(ncols, self.row, dtype=dtype) + self.col new_row, new_col = divmod(flat_indices, shape[1]) elif order == 'F': dtype = self._get_index_dtype(maxval=(nrows * max(0, ncols - 1) + max(0, nrows - 1))) flat_indices = np.multiply(nrows, self.col, dtype=dtype) + self.row new_col, new_row = divmod(flat_indices, shape[0]) else: raise ValueError("'order' must be 'C' or 'F'") # Handle copy here rather than passing on to the constructor so that no # copy will be made of new_row and new_col regardless if copy: new_data = self.data.copy() else: new_data = self.data return self.__class__((new_data, (new_row, new_col)), shape=shape, copy=False) reshape.__doc__ = _spbase.reshape.__doc__ def _getnnz(self, axis=None): if axis is None: nnz = len(self.data) if nnz != len(self.row) or nnz != len(self.col): raise ValueError('row, column, and data array must all be the ' 'same length') if self.data.ndim != 1 or self.row.ndim != 1 or \ self.col.ndim != 1: raise ValueError('row, column, and data arrays must be 1-D') return int(nnz) if axis < 0: axis += 2 if axis == 0: return np.bincount(downcast_intp_index(self.col), minlength=self.shape[1]) elif axis == 1: return np.bincount(downcast_intp_index(self.row), minlength=self.shape[0]) else: raise ValueError('axis out of bounds') _getnnz.__doc__ = _spbase._getnnz.__doc__ def _check(self): """ Checks data structure for consistency """ # index arrays should have integer data types if self.row.dtype.kind != 'i': warn("row index array has non-integer dtype (%s) " % self.row.dtype.name) if self.col.dtype.kind != 'i': warn("col index array has non-integer dtype (%s) " % self.col.dtype.name) idx_dtype = self._get_index_dtype((self.row, self.col), maxval=max(self.shape)) self.row = np.asarray(self.row, dtype=idx_dtype) self.col = np.asarray(self.col, dtype=idx_dtype) self.data = to_native(self.data) if self.nnz > 0: if self.row.max() >= self.shape[0]: raise ValueError('row index exceeds matrix dimensions') if self.col.max() >= self.shape[1]: raise ValueError('column index exceeds matrix dimensions') if self.row.min() < 0: raise ValueError('negative row index found') if self.col.min() < 0: raise ValueError('negative column index found') def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") M, N = self.shape return self.__class__((self.data, (self.col, self.row)), shape=(N, M), copy=copy) transpose.__doc__ = _spbase.transpose.__doc__ def resize(self, *shape): shape = check_shape(shape) new_M, new_N = shape M, N = self.shape if new_M < M or new_N < N: mask = np.logical_and(self.row < new_M, self.col < new_N) if not mask.all(): self.row = self.row[mask] self.col = self.col[mask] self.data = self.data[mask] self._shape = shape resize.__doc__ = _spbase.resize.__doc__ def toarray(self, order=None, out=None): """See the docstring for `_spbase.toarray`.""" B = self._process_toarray_args(order, out) fortran = int(B.flags.f_contiguous) if not fortran and not B.flags.c_contiguous: raise ValueError("Output array must be C or F contiguous") M,N = self.shape coo_todense(M, N, self.nnz, self.row, self.col, self.data, B.ravel('A'), fortran) return B def tocsc(self, copy=False): """Convert this matrix to Compressed Sparse Column format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_array >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsc() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ if self.nnz == 0: return self._csc_container(self.shape, dtype=self.dtype) else: M,N = self.shape idx_dtype = self._get_index_dtype( (self.col, self.row), maxval=max(self.nnz, M) ) row = self.row.astype(idx_dtype, copy=False) col = self.col.astype(idx_dtype, copy=False) indptr = np.empty(N + 1, dtype=idx_dtype) indices = np.empty_like(row, dtype=idx_dtype) data = np.empty_like(self.data, dtype=upcast(self.dtype)) coo_tocsr(N, M, self.nnz, col, row, self.data, indptr, indices, data) x = self._csc_container((data, indices, indptr), shape=self.shape) if not self.has_canonical_format: x.sum_duplicates() return x def tocsr(self, copy=False): """Convert this matrix to Compressed Sparse Row format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_array >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsr() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) """ if self.nnz == 0: return self._csr_container(self.shape, dtype=self.dtype) else: M,N = self.shape idx_dtype = self._get_index_dtype( (self.row, self.col), maxval=max(self.nnz, N) ) row = self.row.astype(idx_dtype, copy=False) col = self.col.astype(idx_dtype, copy=False) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty_like(col, dtype=idx_dtype) data = np.empty_like(self.data, dtype=upcast(self.dtype)) coo_tocsr(M, N, self.nnz, row, col, self.data, indptr, indices, data) x = self._csr_container((data, indices, indptr), shape=self.shape) if not self.has_canonical_format: x.sum_duplicates() return x def tocoo(self, copy=False): if copy: return self.copy() else: return self tocoo.__doc__ = _spbase.tocoo.__doc__ def todia(self, copy=False): self.sum_duplicates() ks = self.col - self.row # the diagonal for each nonzero diags, diag_idx = np.unique(ks, return_inverse=True) if len(diags) > 100: # probably undesired, should todia() have a maxdiags parameter? warn("Constructing a DIA matrix with %d diagonals " "is inefficient" % len(diags), SparseEfficiencyWarning) #initialize and fill in data array if self.data.size == 0: data = np.zeros((0, 0), dtype=self.dtype) else: data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype) data[diag_idx, self.col] = self.data return self._dia_container((data, diags), shape=self.shape) todia.__doc__ = _spbase.todia.__doc__ def todok(self, copy=False): self.sum_duplicates() dok = self._dok_container((self.shape), dtype=self.dtype) dok._update(zip(zip(self.row,self.col),self.data)) return dok todok.__doc__ = _spbase.todok.__doc__ def diagonal(self, k=0): rows, cols = self.shape if k <= -rows or k >= cols: return np.empty(0, dtype=self.data.dtype) diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), dtype=self.dtype) diag_mask = (self.row + k) == self.col if self.has_canonical_format: row = self.row[diag_mask] data = self.data[diag_mask] else: row, _, data = self._sum_duplicates(self.row[diag_mask], self.col[diag_mask], self.data[diag_mask]) diag[row + min(k, 0)] = data return diag diagonal.__doc__ = _data_matrix.diagonal.__doc__ def _setdiag(self, values, k): M, N = self.shape if values.ndim and not len(values): return idx_dtype = self.row.dtype # Determine which triples to keep and where to put the new ones. full_keep = self.col - self.row != k if k < 0: max_index = min(M+k, N) if values.ndim: max_index = min(max_index, len(values)) keep = np.logical_or(full_keep, self.col >= max_index) new_row = np.arange(-k, -k + max_index, dtype=idx_dtype) new_col = np.arange(max_index, dtype=idx_dtype) else: max_index = min(M, N-k) if values.ndim: max_index = min(max_index, len(values)) keep = np.logical_or(full_keep, self.row >= max_index) new_row = np.arange(max_index, dtype=idx_dtype) new_col = np.arange(k, k + max_index, dtype=idx_dtype) # Define the array of data consisting of the entries to be added. if values.ndim: new_data = values[:max_index] else: new_data = np.empty(max_index, dtype=self.dtype) new_data[:] = values # Update the internal structure. self.row = np.concatenate((self.row[keep], new_row)) self.col = np.concatenate((self.col[keep], new_col)) self.data = np.concatenate((self.data[keep], new_data)) self.has_canonical_format = False # needed by _data_matrix def _with_data(self,data,copy=True): """Returns a matrix with the same sparsity structure as self, but with different data. By default the index arrays (i.e. .row and .col) are copied. """ if copy: return self.__class__((data, (self.row.copy(), self.col.copy())), shape=self.shape, dtype=data.dtype) else: return self.__class__((data, (self.row, self.col)), shape=self.shape, dtype=data.dtype) def sum_duplicates(self): """Eliminate duplicate matrix entries by adding them together This is an *in place* operation """ if self.has_canonical_format: return summed = self._sum_duplicates(self.row, self.col, self.data) self.row, self.col, self.data = summed self.has_canonical_format = True def _sum_duplicates(self, row, col, data): # Assumes (data, row, col) not in canonical format. if len(data) == 0: return row, col, data # Sort indices w.r.t. rows, then cols. This corresponds to C-order, # which we rely on for argmin/argmax to return the first index in the # same way that numpy does (in the case of ties). order = np.lexsort((col, row)) row = row[order] col = col[order] data = data[order] unique_mask = ((row[1:] != row[:-1]) | (col[1:] != col[:-1])) unique_mask = np.append(True, unique_mask) row = row[unique_mask] col = col[unique_mask] unique_inds, = np.nonzero(unique_mask) data = np.add.reduceat(data, unique_inds, dtype=self.dtype) return row, col, data def eliminate_zeros(self): """Remove zero entries from the matrix This is an *in place* operation """ mask = self.data != 0 self.data = self.data[mask] self.row = self.row[mask] self.col = self.col[mask] ####################### # Arithmetic handlers # ####################### def _add_dense(self, other): if other.shape != self.shape: raise ValueError('Incompatible shapes ({} and {})' .format(self.shape, other.shape)) dtype = upcast_char(self.dtype.char, other.dtype.char) result = np.array(other, dtype=dtype, copy=True) fortran = int(result.flags.f_contiguous) M, N = self.shape coo_todense(M, N, self.nnz, self.row, self.col, self.data, result.ravel('A'), fortran) return self._container(result, copy=False) def _mul_vector(self, other): #output array result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, other.dtype.char)) coo_matvec(self.nnz, self.row, self.col, self.data, other, result) return result def _mul_multivector(self, other): result = np.zeros((other.shape[1], self.shape[0]), dtype=upcast_char(self.dtype.char, other.dtype.char)) for i, col in enumerate(other.T): coo_matvec(self.nnz, self.row, self.col, self.data, col, result[i]) return result.T.view(type=type(other)) def isspmatrix_coo(x): """Is `x` of coo_matrix type? Parameters ---------- x object to check for being a coo matrix Returns ------- bool True if `x` is a coo matrix, False otherwise Examples -------- >>> from scipy.sparse import coo_array, coo_matrix, csr_matrix, isspmatrix_coo >>> isspmatrix_coo(coo_matrix([[5]])) True >>> isspmatrix_coo(coo_array([[5]])) False >>> isspmatrix_coo(csr_matrix([[5]])) False """ return isinstance(x, coo_matrix) # This namespace class separates array from matrix with isinstance class coo_array(_coo_base, sparray): pass coo_array.__doc__ = _coo_base.__doc__ class coo_matrix(spmatrix, _coo_base): pass coo_matrix.__doc__ = _array_doc_to_matrix(_coo_base.__doc__)
23,095
35.200627
106
py
scipy
scipy-main/scipy/sparse/lil.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse` namespace for importing the functions # included below. import warnings from . import _lil __all__ = [ # noqa: F822 'INT_TYPES', 'IndexMixin', 'bisect_left', 'check_reshape_kwargs', 'check_shape', 'get_index_dtype', 'getdtype', 'isscalarlike', 'isshape', 'isspmatrix', 'isspmatrix_lil', 'lil_matrix', 'spmatrix', 'upcast_scalar', ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.lil is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, " "the `scipy.sparse.lil` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_lil, name)
981
22.380952
76
py
scipy
scipy-main/scipy/sparse/_construct.py
"""Functions to construct sparse matrices and arrays """ __docformat__ = "restructuredtext en" __all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum', 'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag', 'diags_array'] import numbers from functools import partial import numpy as np from scipy._lib._util import check_random_state, rng_integers from ._sputils import upcast, get_index_dtype, isscalarlike from ._sparsetools import csr_hstack from ._bsr import bsr_matrix from ._coo import coo_matrix from ._csc import csc_matrix from ._csr import csr_matrix from ._dia import dia_matrix, dia_array from ._base import issparse def spdiags(data, diags, m=None, n=None, format=None): """ Return a sparse matrix from diagonals. Parameters ---------- data : array_like Matrix diagonals stored row-wise diags : sequence of int or an int Diagonals to set: * k = 0 the main diagonal * k > 0 the kth upper diagonal * k < 0 the kth lower diagonal m, n : int, tuple, optional Shape of the result. If `n` is None and `m` is a given tuple, the shape is this tuple. If omitted, the matrix is square and its shape is len(data[0]). format : str, optional Format of the result. By default (format=None) an appropriate sparse matrix format is returned. This choice is subject to change. See Also -------- diags : more convenient form of this function dia_matrix : the sparse DIAgonal format. Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) >>> diags = np.array([0, -1, 2]) >>> spdiags(data, diags, 4, 4).toarray() array([[1, 0, 3, 0], [1, 2, 0, 4], [0, 2, 3, 0], [0, 0, 3, 4]]) """ if m is None and n is None: m = n = len(data[0]) elif n is None: m, n = m return dia_matrix((data, diags), shape=(m, n)).asformat(format) def diags_array(diagonals, /, *, offsets=0, shape=None, format=None, dtype=None): """ Construct a sparse array from diagonals. Parameters ---------- diagonals : sequence of array_like Sequence of arrays containing the array diagonals, corresponding to `offsets`. offsets : sequence of int or an int, optional Diagonals to set: - k = 0 the main diagonal (default) - k > 0 the kth upper diagonal - k < 0 the kth lower diagonal shape : tuple of int, optional Shape of the result. If omitted, a square array large enough to contain the diagonals is returned. format : {"dia", "csr", "csc", "lil", ...}, optional Matrix format of the result. By default (format=None) an appropriate sparse array format is returned. This choice is subject to change. dtype : dtype, optional Data type of the array. Notes ----- The result from `diags_array` is the sparse equivalent of:: np.diag(diagonals[0], offsets[0]) + ... + np.diag(diagonals[k], offsets[k]) Repeated diagonal offsets are disallowed. .. versionadded:: 1.11 Examples -------- >>> from scipy.sparse import diags_array >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] >>> diags_array(diagonals, offsets=[0, -1, 2]).toarray() array([[1, 0, 1, 0], [1, 2, 0, 2], [0, 2, 3, 0], [0, 0, 3, 4]]) Broadcasting of scalars is supported (but shape needs to be specified): >>> diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)).toarray() array([[-2., 1., 0., 0.], [ 1., -2., 1., 0.], [ 0., 1., -2., 1.], [ 0., 0., 1., -2.]]) If only one diagonal is wanted (as in `numpy.diag`), the following works as well: >>> diags_array([1, 2, 3], offsets=1).toarray() array([[ 0., 1., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 3.], [ 0., 0., 0., 0.]]) """ # if offsets is not a sequence, assume that there's only one diagonal if isscalarlike(offsets): # now check that there's actually only one diagonal if len(diagonals) == 0 or isscalarlike(diagonals[0]): diagonals = [np.atleast_1d(diagonals)] else: raise ValueError("Different number of diagonals and offsets.") else: diagonals = list(map(np.atleast_1d, diagonals)) offsets = np.atleast_1d(offsets) # Basic check if len(diagonals) != len(offsets): raise ValueError("Different number of diagonals and offsets.") # Determine shape, if omitted if shape is None: m = len(diagonals[0]) + abs(int(offsets[0])) shape = (m, m) # Determine data type, if omitted if dtype is None: dtype = np.common_type(*diagonals) # Construct data array m, n = shape M = max([min(m + offset, n - offset) + max(0, offset) for offset in offsets]) M = max(0, M) data_arr = np.zeros((len(offsets), M), dtype=dtype) K = min(m, n) for j, diagonal in enumerate(diagonals): offset = offsets[j] k = max(0, offset) length = min(m + offset, n - offset, K) if length < 0: raise ValueError("Offset %d (index %d) out of bounds" % (offset, j)) try: data_arr[j, k:k+length] = diagonal[...,:length] except ValueError as e: if len(diagonal) != length and len(diagonal) != 1: raise ValueError( "Diagonal length (index %d: %d at offset %d) does not " "agree with array size (%d, %d)." % ( j, len(diagonal), offset, m, n)) from e raise return dia_array((data_arr, offsets), shape=(m, n)).asformat(format) def diags(diagonals, offsets=0, shape=None, format=None, dtype=None): """ Construct a sparse matrix from diagonals. Parameters ---------- diagonals : sequence of array_like Sequence of arrays containing the matrix diagonals, corresponding to `offsets`. offsets : sequence of int or an int, optional Diagonals to set: - k = 0 the main diagonal (default) - k > 0 the kth upper diagonal - k < 0 the kth lower diagonal shape : tuple of int, optional Shape of the result. If omitted, a square matrix large enough to contain the diagonals is returned. format : {"dia", "csr", "csc", "lil", ...}, optional Matrix format of the result. By default (format=None) an appropriate sparse matrix format is returned. This choice is subject to change. dtype : dtype, optional Data type of the matrix. See Also -------- spdiags : construct matrix from diagonals Notes ----- This function differs from `spdiags` in the way it handles off-diagonals. The result from `diags` is the sparse equivalent of:: np.diag(diagonals[0], offsets[0]) + ... + np.diag(diagonals[k], offsets[k]) Repeated diagonal offsets are disallowed. .. versionadded:: 0.11 Examples -------- >>> from scipy.sparse import diags >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] >>> diags(diagonals, [0, -1, 2]).toarray() array([[1, 0, 1, 0], [1, 2, 0, 2], [0, 2, 3, 0], [0, 0, 3, 4]]) Broadcasting of scalars is supported (but shape needs to be specified): >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray() array([[-2., 1., 0., 0.], [ 1., -2., 1., 0.], [ 0., 1., -2., 1.], [ 0., 0., 1., -2.]]) If only one diagonal is wanted (as in `numpy.diag`), the following works as well: >>> diags([1, 2, 3], 1).toarray() array([[ 0., 1., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 3.], [ 0., 0., 0., 0.]]) """ A = diags_array(diagonals, offsets=offsets, shape=shape, dtype=dtype) return dia_matrix(A).asformat(format) def identity(n, dtype='d', format=None): """Identity matrix in sparse format Returns an identity matrix with shape (n,n) using a given sparse format and dtype. Parameters ---------- n : int Shape of the identity matrix. dtype : dtype, optional Data type of the matrix format : str, optional Sparse format of the result, e.g., format="csr", etc. Examples -------- >>> from scipy.sparse import identity >>> identity(3).toarray() array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> identity(3, dtype='int8', format='dia') <3x3 sparse matrix of type '<class 'numpy.int8'>' with 3 stored elements (1 diagonals) in DIAgonal format> """ return eye(n, n, dtype=dtype, format=format) def eye(m, n=None, k=0, dtype=float, format=None): """Sparse matrix with ones on diagonal Returns a sparse (m x n) matrix where the kth diagonal is all ones and everything else is zeros. Parameters ---------- m : int Number of rows in the matrix. n : int, optional Number of columns. Default: `m`. k : int, optional Diagonal to place ones on. Default: 0 (main diagonal). dtype : dtype, optional Data type of the matrix. format : str, optional Sparse format of the result, e.g., format="csr", etc. Examples -------- >>> import numpy as np >>> from scipy import sparse >>> sparse.eye(3).toarray() array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> sparse.eye(3, dtype=np.int8) <3x3 sparse matrix of type '<class 'numpy.int8'>' with 3 stored elements (1 diagonals) in DIAgonal format> """ if n is None: n = m m,n = int(m),int(n) if m == n and k == 0: # fast branch for special formats if format in ['csr', 'csc']: idx_dtype = get_index_dtype(maxval=n) indptr = np.arange(n+1, dtype=idx_dtype) indices = np.arange(n, dtype=idx_dtype) data = np.ones(n, dtype=dtype) cls = {'csr': csr_matrix, 'csc': csc_matrix}[format] return cls((data,indices,indptr),(n,n)) elif format == 'coo': idx_dtype = get_index_dtype(maxval=n) row = np.arange(n, dtype=idx_dtype) col = np.arange(n, dtype=idx_dtype) data = np.ones(n, dtype=dtype) return coo_matrix((data, (row, col)), (n, n)) data = np.ones((1, max(0, min(m + k, n))), dtype=dtype) return diags(data, offsets=[k], shape=(m, n), dtype=dtype).asformat(format) def kron(A, B, format=None): """kronecker product of sparse matrices A and B Parameters ---------- A : sparse or dense matrix first matrix of the product B : sparse or dense matrix second matrix of the product format : str, optional format of the result (e.g. "csr") Returns ------- kronecker product in a sparse matrix format Examples -------- >>> import numpy as np >>> from scipy import sparse >>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]])) >>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]])) >>> sparse.kron(A, B).toarray() array([[ 0, 0, 2, 4], [ 0, 0, 6, 8], [ 5, 10, 0, 0], [15, 20, 0, 0]]) >>> sparse.kron(A, [[1, 2], [3, 4]]).toarray() array([[ 0, 0, 2, 4], [ 0, 0, 6, 8], [ 5, 10, 0, 0], [15, 20, 0, 0]]) """ B = coo_matrix(B) if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]: # B is fairly dense, use BSR A = csr_matrix(A,copy=True) output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) if A.nnz == 0 or B.nnz == 0: # kronecker product is the zero matrix return coo_matrix(output_shape).asformat(format) B = B.toarray() data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1]) data = data * B return bsr_matrix((data,A.indices,A.indptr), shape=output_shape) else: # use COO A = coo_matrix(A) output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) if A.nnz == 0 or B.nnz == 0: # kronecker product is the zero matrix return coo_matrix(output_shape).asformat(format) # expand entries of a into blocks row = A.row.repeat(B.nnz) col = A.col.repeat(B.nnz) data = A.data.repeat(B.nnz) if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max: row = row.astype(np.int64) col = col.astype(np.int64) row *= B.shape[0] col *= B.shape[1] # increment block indices row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz) row += B.row col += B.col row,col = row.reshape(-1),col.reshape(-1) # compute block entries data = data.reshape(-1,B.nnz) * B.data data = data.reshape(-1) return coo_matrix((data,(row,col)), shape=output_shape).asformat(format) def kronsum(A, B, format=None): """kronecker sum of sparse matrices A and B Kronecker sum of two sparse matrices is a sum of two Kronecker products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) and B has shape (n,n) and I_m and I_n are identity matrices of shape (m,m) and (n,n), respectively. Parameters ---------- A square matrix B square matrix format : str format of the result (e.g. "csr") Returns ------- kronecker sum in a sparse matrix format Examples -------- """ A = coo_matrix(A) B = coo_matrix(B) if A.shape[0] != A.shape[1]: raise ValueError('A is not square') if B.shape[0] != B.shape[1]: raise ValueError('B is not square') dtype = upcast(A.dtype, B.dtype) L = kron(eye(B.shape[0],dtype=dtype), A, format=format) R = kron(B, eye(A.shape[0],dtype=dtype), format=format) return (L+R).asformat(format) # since L + R is not always same format def _compressed_sparse_stack(blocks, axis): """ Stacking fast path for CSR/CSC matrices (i) vstack for CSR, (ii) hstack for CSC. """ other_axis = 1 if axis == 0 else 0 data = np.concatenate([b.data for b in blocks]) constant_dim = blocks[0].shape[other_axis] idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks], maxval=max(data.size, constant_dim)) indices = np.empty(data.size, dtype=idx_dtype) indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype) last_indptr = idx_dtype(0) sum_dim = 0 sum_indices = 0 for b in blocks: if b.shape[other_axis] != constant_dim: raise ValueError(f'incompatible dimensions for axis {other_axis}') indices[sum_indices:sum_indices+b.indices.size] = b.indices sum_indices += b.indices.size idxs = slice(sum_dim, sum_dim + b.shape[axis]) indptr[idxs] = b.indptr[:-1] indptr[idxs] += last_indptr sum_dim += b.shape[axis] last_indptr += b.indptr[-1] indptr[-1] = last_indptr if axis == 0: return csr_matrix((data, indices, indptr), shape=(sum_dim, constant_dim)) else: return csc_matrix((data, indices, indptr), shape=(constant_dim, sum_dim)) def _stack_along_minor_axis(blocks, axis): """ Stacking fast path for CSR/CSC matrices along the minor axis (i) hstack for CSR, (ii) vstack for CSC. """ n_blocks = len(blocks) if n_blocks == 0: raise ValueError('Missing block matrices') if n_blocks == 1: return blocks[0] # check for incompatible dimensions other_axis = 1 if axis == 0 else 0 other_axis_dims = {b.shape[other_axis] for b in blocks} if len(other_axis_dims) > 1: raise ValueError(f'Mismatching dimensions along axis {other_axis}: ' f'{other_axis_dims}') constant_dim, = other_axis_dims # Do the stacking indptr_list = [b.indptr for b in blocks] data_cat = np.concatenate([b.data for b in blocks]) # Need to check if any indices/indptr, would be too large post- # concatenation for np.int32: # - The max value of indices is the output array's stacking-axis length - 1 # - The max value in indptr is the number of non-zero entries. This is # exceedingly unlikely to require int64, but is checked out of an # abundance of caution. sum_dim = sum(b.shape[axis] for b in blocks) nnz = sum(len(b.indices) for b in blocks) idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz)) stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype) if data_cat.size > 0: indptr_cat = np.concatenate(indptr_list).astype(idx_dtype) indices_cat = (np.concatenate([b.indices for b in blocks]) .astype(idx_dtype)) indptr = np.empty(constant_dim + 1, dtype=idx_dtype) indices = np.empty_like(indices_cat) data = np.empty_like(data_cat) csr_hstack(n_blocks, constant_dim, stack_dim_cat, indptr_cat, indices_cat, data_cat, indptr, indices, data) else: indptr = np.zeros(constant_dim + 1, dtype=idx_dtype) indices = np.empty(0, dtype=idx_dtype) data = np.empty(0, dtype=data_cat.dtype) if axis == 0: return csc_matrix((data, indices, indptr), shape=(sum_dim, constant_dim)) else: return csr_matrix((data, indices, indptr), shape=(constant_dim, sum_dim)) def hstack(blocks, format=None, dtype=None): """ Stack sparse matrices horizontally (column wise) Parameters ---------- blocks sequence of sparse matrices with compatible shapes format : str sparse format of the result (e.g., "csr") by default an appropriate sparse matrix format is returned. This choice is subject to change. dtype : dtype, optional The data-type of the output matrix. If not given, the dtype is determined from that of `blocks`. See Also -------- vstack : stack sparse matrices vertically (row wise) Examples -------- >>> from scipy.sparse import coo_matrix, hstack >>> A = coo_matrix([[1, 2], [3, 4]]) >>> B = coo_matrix([[5], [6]]) >>> hstack([A,B]).toarray() array([[1, 2, 5], [3, 4, 6]]) """ return bmat([blocks], format=format, dtype=dtype) def vstack(blocks, format=None, dtype=None): """ Stack sparse matrices vertically (row wise) Parameters ---------- blocks sequence of sparse matrices with compatible shapes format : str, optional sparse format of the result (e.g., "csr") by default an appropriate sparse matrix format is returned. This choice is subject to change. dtype : dtype, optional The data-type of the output matrix. If not given, the dtype is determined from that of `blocks`. See Also -------- hstack : stack sparse matrices horizontally (column wise) Examples -------- >>> from scipy.sparse import coo_matrix, vstack >>> A = coo_matrix([[1, 2], [3, 4]]) >>> B = coo_matrix([[5, 6]]) >>> vstack([A, B]).toarray() array([[1, 2], [3, 4], [5, 6]]) """ return bmat([[b] for b in blocks], format=format, dtype=dtype) def bmat(blocks, format=None, dtype=None): """ Build a sparse matrix from sparse sub-blocks Parameters ---------- blocks : array_like Grid of sparse matrices with compatible shapes. An entry of None implies an all-zero matrix. format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional The sparse format of the result (e.g. "csr"). By default an appropriate sparse matrix format is returned. This choice is subject to change. dtype : dtype, optional The data-type of the output matrix. If not given, the dtype is determined from that of `blocks`. Returns ------- bmat : sparse matrix See Also -------- block_diag, diags Examples -------- >>> from scipy.sparse import coo_matrix, bmat >>> A = coo_matrix([[1, 2], [3, 4]]) >>> B = coo_matrix([[5], [6]]) >>> C = coo_matrix([[7]]) >>> bmat([[A, B], [None, C]]).toarray() array([[1, 2, 5], [3, 4, 6], [0, 0, 7]]) >>> bmat([[A, None], [None, C]]).toarray() array([[1, 2, 0], [3, 4, 0], [0, 0, 7]]) """ blocks = np.asarray(blocks, dtype='object') if blocks.ndim != 2: raise ValueError('blocks must be 2-D') M,N = blocks.shape # check for fast path cases if (format in (None, 'csr') and all(isinstance(b, csr_matrix) for b in blocks.flat)): if N > 1: # stack along columns (axis 1): blocks = [[_stack_along_minor_axis(blocks[b, :], 1)] for b in range(M)] # must have shape: (M, 1) blocks = np.asarray(blocks, dtype='object') # stack along rows (axis 0): A = _compressed_sparse_stack(blocks[:, 0], 0) if dtype is not None: A = A.astype(dtype) return A elif (format in (None, 'csc') and all(isinstance(b, csc_matrix) for b in blocks.flat)): if M > 1: # stack along rows (axis 0): blocks = [[_stack_along_minor_axis(blocks[:, b], 0) for b in range(N)]] # must have shape: (1, N) blocks = np.asarray(blocks, dtype='object') # stack along columns (axis 1): A = _compressed_sparse_stack(blocks[0, :], 1) if dtype is not None: A = A.astype(dtype) return A block_mask = np.zeros(blocks.shape, dtype=bool) brow_lengths = np.zeros(M, dtype=np.int64) bcol_lengths = np.zeros(N, dtype=np.int64) # convert everything to COO format for i in range(M): for j in range(N): if blocks[i,j] is not None: A = coo_matrix(blocks[i,j]) blocks[i,j] = A block_mask[i,j] = True if brow_lengths[i] == 0: brow_lengths[i] = A.shape[0] elif brow_lengths[i] != A.shape[0]: msg = (f'blocks[{i},:] has incompatible row dimensions. ' f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, ' f'expected {brow_lengths[i]}.') raise ValueError(msg) if bcol_lengths[j] == 0: bcol_lengths[j] = A.shape[1] elif bcol_lengths[j] != A.shape[1]: msg = (f'blocks[:,{j}] has incompatible column ' f'dimensions. ' f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, ' f'expected {bcol_lengths[j]}.') raise ValueError(msg) nnz = sum(block.nnz for block in blocks[block_mask]) if dtype is None: all_dtypes = [blk.dtype for blk in blocks[block_mask]] dtype = upcast(*all_dtypes) if all_dtypes else None row_offsets = np.append(0, np.cumsum(brow_lengths)) col_offsets = np.append(0, np.cumsum(bcol_lengths)) shape = (row_offsets[-1], col_offsets[-1]) data = np.empty(nnz, dtype=dtype) idx_dtype = get_index_dtype(maxval=max(shape)) row = np.empty(nnz, dtype=idx_dtype) col = np.empty(nnz, dtype=idx_dtype) nnz = 0 ii, jj = np.nonzero(block_mask) for i, j in zip(ii, jj): B = blocks[i, j] idx = slice(nnz, nnz + B.nnz) data[idx] = B.data np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype) np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype) nnz += B.nnz return coo_matrix((data, (row, col)), shape=shape).asformat(format) def block_diag(mats, format=None, dtype=None): """ Build a block diagonal sparse matrix from provided matrices. Parameters ---------- mats : sequence of matrices Input matrices. format : str, optional The sparse format of the result (e.g., "csr"). If not given, the matrix is returned in "coo" format. dtype : dtype specifier, optional The data-type of the output matrix. If not given, the dtype is determined from that of `blocks`. Returns ------- res : sparse matrix Notes ----- .. versionadded:: 0.11.0 See Also -------- bmat, diags Examples -------- >>> from scipy.sparse import coo_matrix, block_diag >>> A = coo_matrix([[1, 2], [3, 4]]) >>> B = coo_matrix([[5], [6]]) >>> C = coo_matrix([[7]]) >>> block_diag((A, B, C)).toarray() array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 5, 0], [0, 0, 6, 0], [0, 0, 0, 7]]) """ row = [] col = [] data = [] r_idx = 0 c_idx = 0 for a in mats: if isinstance(a, (list, numbers.Number)): a = coo_matrix(a) nrows, ncols = a.shape if issparse(a): a = a.tocoo() row.append(a.row + r_idx) col.append(a.col + c_idx) data.append(a.data) else: a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols) row.append(a_row + r_idx) col.append(a_col + c_idx) data.append(a.ravel()) r_idx += nrows c_idx += ncols row = np.concatenate(row) col = np.concatenate(col) data = np.concatenate(data) return coo_matrix((data, (row, col)), shape=(r_idx, c_idx), dtype=dtype).asformat(format) def random(m, n, density=0.01, format='coo', dtype=None, random_state=None, data_rvs=None): """Generate a sparse matrix of the given shape and density with randomly distributed values. .. warning:: Since numpy 1.17, passing a ``np.random.Generator`` (e.g. ``np.random.default_rng``) for ``random_state`` will lead to much faster execution times. A much slower implementation is used by default for backwards compatibility. Parameters ---------- m, n : int shape of the matrix density : real, optional density of the generated matrix: density equal to one means a full matrix, density of 0 means a matrix with no non-zero items. format : str, optional sparse matrix format. dtype : dtype, optional type of the returned matrix values. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional - If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. - If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. - If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. This random state will be used for sampling the sparsity structure, but not necessarily for sampling the values of the structurally nonzero entries of the matrix. data_rvs : callable, optional Samples a requested number of random values. This function should take a single argument specifying the length of the ndarray that it will return. The structurally nonzero entries of the sparse random matrix will be taken from the array sampled by this function. By default, uniform [0, 1) random values will be sampled using the same random state as is used for sampling the sparsity structure. Returns ------- res : sparse matrix Examples -------- Passing a ``np.random.Generator`` instance for better performance: >>> from scipy.sparse import random >>> from scipy import stats >>> from numpy.random import default_rng >>> rng = default_rng() >>> S = random(3, 4, density=0.25, random_state=rng) Proving a sampler for the values: >>> rvs = stats.poisson(25, loc=10).rvs >>> S = random(3, 4, density=0.25, random_state=rng, data_rvs=rvs) >>> S.A array([[ 36., 0., 33., 0.], # random [ 0., 0., 0., 0.], [ 0., 0., 36., 0.]]) Using a custom distribution: >>> class CustomDistribution(stats.rv_continuous): ... def _rvs(self, size=None, random_state=None): ... return random_state.standard_normal(size) >>> X = CustomDistribution(seed=rng) >>> Y = X() # get a frozen version of the distribution >>> S = random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs) >>> S.A array([[ 0. , 0. , 0. , 0. ], # random [ 0.13569738, 1.9467163 , -0.81205367, 0. ], [ 0. , 0. , 0. , 0. ]]) """ if density < 0 or density > 1: raise ValueError("density expected to be 0 <= density <= 1") dtype = np.dtype(dtype) mn = m * n tp = np.intc if mn > np.iinfo(tp).max: tp = np.int64 if mn > np.iinfo(tp).max: msg = """\ Trying to generate a random sparse matrix such as the product of dimensions is greater than %d - this is not supported on this machine """ raise ValueError(msg % np.iinfo(tp).max) # Number of non zero values k = int(round(density * m * n)) random_state = check_random_state(random_state) if data_rvs is None: if np.issubdtype(dtype, np.integer): def data_rvs(n): return rng_integers(random_state, np.iinfo(dtype).min, np.iinfo(dtype).max, n, dtype=dtype) elif np.issubdtype(dtype, np.complexfloating): def data_rvs(n): return (random_state.uniform(size=n) + random_state.uniform(size=n) * 1j) else: data_rvs = partial(random_state.uniform, 0., 1.) ind = random_state.choice(mn, size=k, replace=False) j = np.floor(ind * 1. / m).astype(tp, copy=False) i = (ind - j * m).astype(tp, copy=False) vals = data_rvs(k).astype(dtype, copy=False) return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format, copy=False) def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None): """Generate a sparse matrix of the given shape and density with uniformly distributed values. Parameters ---------- m, n : int shape of the matrix density : real, optional density of the generated matrix: density equal to one means a full matrix, density of 0 means a matrix with no non-zero items. format : str, optional sparse matrix format. dtype : dtype, optional type of the returned matrix values. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional If `seed` is None (or `np.random`), the `numpy.random.RandomState` singleton is used. If `seed` is an int, a new ``RandomState`` instance is used, seeded with `seed`. If `seed` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- res : sparse matrix Notes ----- Only float types are supported for now. See Also -------- scipy.sparse.random : Similar function that allows a user-specified random data source. Examples -------- >>> from scipy.sparse import rand >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42) >>> matrix <3x4 sparse matrix of type '<class 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> matrix.toarray() array([[0.05641158, 0. , 0. , 0.65088847], [0. , 0. , 0. , 0.14286682], [0. , 0. , 0. , 0. ]]) """ return random(m, n, density, format, dtype, random_state)
32,912
30.954369
85
py
scipy
scipy-main/scipy/sparse/_csc.py
"""Compressed Sparse Column matrix format""" __docformat__ = "restructuredtext en" __all__ = ['csc_array', 'csc_matrix', 'isspmatrix_csc'] import numpy as np from ._matrix import spmatrix, _array_doc_to_matrix from ._base import _spbase, sparray from ._sparsetools import csc_tocsr, expandptr from ._sputils import upcast from ._compressed import _cs_matrix class _csc_base(_cs_matrix): """ Compressed Sparse Column matrix This can be instantiated in several ways: csc_array(D) with a dense matrix or rank-2 ndarray D csc_array(S) with another sparse matrix S (equivalent to S.tocsc()) csc_array((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. csc_array((data, (row_ind, col_ind)), [shape=(M, N)]) where ``data``, ``row_ind`` and ``col_ind`` satisfy the relationship ``a[row_ind[k], col_ind[k]] = data[k]``. csc_array((data, indices, indptr), [shape=(M, N)]) is the standard CSC representation where the row indices for column i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of stored values, including explicit zeros data Data array of the matrix indices CSC format index array indptr CSC format index pointer array has_sorted_indices Whether indices are sorted Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the CSC format - efficient arithmetic operations CSC + CSC, CSC * CSC, etc. - efficient column slicing - fast matrix vector products (CSR, BSR may be faster) Disadvantages of the CSC format - slow row slicing operations (consider CSR) - changes to the sparsity structure are expensive (consider LIL or DOK) Canonical format - Within each column, indices are sorted by row. - There are no duplicate entries. Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_array >>> csc_array((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 2, 2, 0, 1, 2]) >>> col = np.array([0, 0, 1, 2, 2, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_array((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_array((data, indices, indptr), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) """ _format = 'csc' def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.") M, N = self.shape return self._csr_container((self.data, self.indices, self.indptr), (N, M), copy=copy) transpose.__doc__ = _spbase.transpose.__doc__ def __iter__(self): yield from self.tocsr() def tocsc(self, copy=False): if copy: return self.copy() else: return self tocsc.__doc__ = _spbase.tocsc.__doc__ def tocsr(self, copy=False): M,N = self.shape idx_dtype = self._get_index_dtype((self.indptr, self.indices), maxval=max(self.nnz, N)) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) csc_tocsr(M, N, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, indptr, indices, data) A = self._csr_container( (data, indices, indptr), shape=self.shape, copy=False ) A.has_sorted_indices = True return A tocsr.__doc__ = _spbase.tocsr.__doc__ def nonzero(self): # CSC can't use _cs_matrix's .nonzero method because it # returns the indices sorted for self transposed. # Get row and col indices, from _cs_matrix.tocoo major_dim, minor_dim = self._swap(self.shape) minor_indices = self.indices major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) expandptr(major_dim, self.indptr, major_indices) row, col = self._swap((major_indices, minor_indices)) # Remove explicit zeros nz_mask = self.data != 0 row = row[nz_mask] col = col[nz_mask] # Sort them to be in C-style order ind = np.argsort(row, kind='mergesort') row = row[ind] col = col[ind] return row, col nonzero.__doc__ = _cs_matrix.nonzero.__doc__ def _getrow(self, i): """Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector). """ M, N = self.shape i = int(i) if i < 0: i += M if i < 0 or i >= M: raise IndexError('index (%d) out of range' % i) return self._get_submatrix(minor=i).tocsr() def _getcol(self, i): """Returns a copy of column i of the matrix, as a (m x 1) CSC matrix (column vector). """ M, N = self.shape i = int(i) if i < 0: i += N if i < 0 or i >= N: raise IndexError('index (%d) out of range' % i) return self._get_submatrix(major=i, copy=True) def _get_intXarray(self, row, col): return self._major_index_fancy(col)._get_submatrix(minor=row) def _get_intXslice(self, row, col): if col.step in (1, None): return self._get_submatrix(major=col, minor=row, copy=True) return self._major_slice(col)._get_submatrix(minor=row) def _get_sliceXint(self, row, col): if row.step in (1, None): return self._get_submatrix(major=col, minor=row, copy=True) return self._get_submatrix(major=col)._minor_slice(row) def _get_sliceXarray(self, row, col): return self._major_index_fancy(col)._minor_slice(row) def _get_arrayXint(self, row, col): return self._get_submatrix(major=col)._minor_index_fancy(row) def _get_arrayXslice(self, row, col): return self._major_slice(col)._minor_index_fancy(row) # these functions are used by the parent class (_cs_matrix) # to remove redudancy between csc_array and csr_matrix def _swap(self, x): """swap the members of x if this is a column-oriented matrix """ return x[1], x[0] def isspmatrix_csc(x): """Is `x` of csc_matrix type? Parameters ---------- x object to check for being a csc matrix Returns ------- bool True if `x` is a csc matrix, False otherwise Examples -------- >>> from scipy.sparse import csc_array, csc_matrix, coo_matrix, isspmatrix_csc >>> isspmatrix_csc(csc_matrix([[5]])) True >>> isspmatrix_csc(csc_array([[5]])) False >>> isspmatrix_csc(coo_matrix([[5]])) False """ return isinstance(x, csc_matrix) # This namespace class separates array from matrix with isinstance class csc_array(_csc_base, sparray): pass csc_array.__doc__ = _csc_base.__doc__ class csc_matrix(spmatrix, _csc_base): pass csc_matrix.__doc__ = _array_doc_to_matrix(_csc_base.__doc__)
8,297
29.065217
82
py
scipy
scipy-main/scipy/sparse/csgraph/setup.py
def configuration(parent_package='', top_path=None): import numpy from numpy.distutils.misc_util import Configuration config = Configuration('csgraph', parent_package, top_path) config.add_data_dir('tests') config.add_extension('_shortest_path', sources=['_shortest_path.c'], include_dirs=[numpy.get_include()]) config.add_extension('_traversal', sources=['_traversal.c'], include_dirs=[numpy.get_include()]) config.add_extension('_min_spanning_tree', sources=['_min_spanning_tree.c'], include_dirs=[numpy.get_include()]) config.add_extension('_matching', sources=['_matching.c'], include_dirs=[numpy.get_include()]) config.add_extension('_flow', sources=['_flow.c'], include_dirs=[numpy.get_include()]) config.add_extension('_reordering', sources=['_reordering.c'], include_dirs=[numpy.get_include()]) config.add_extension('_tools', sources=['_tools.c'], include_dirs=[numpy.get_include()]) return config
1,098
27.921053
63
py
scipy
scipy-main/scipy/sparse/csgraph/_validation.py
import numpy as np from scipy.sparse import csr_matrix, issparse from ._tools import csgraph_to_dense, csgraph_from_dense,\ csgraph_masked_from_dense, csgraph_from_masked DTYPE = np.float64 def validate_graph(csgraph, directed, dtype=DTYPE, csr_output=True, dense_output=True, copy_if_dense=False, copy_if_sparse=False, null_value_in=0, null_value_out=np.inf, infinity_null=True, nan_null=True): """Routine for validation and conversion of csgraph inputs""" if not (csr_output or dense_output): raise ValueError("Internal: dense or csr output must be true") # if undirected and csc storage, then transposing in-place # is quicker than later converting to csr. if (not directed) and issparse(csgraph) and csgraph.format == "csc": csgraph = csgraph.T if issparse(csgraph): if csr_output: csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse) else: csgraph = csgraph_to_dense(csgraph, null_value=null_value_out) elif np.ma.isMaskedArray(csgraph): if dense_output: mask = csgraph.mask csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense) csgraph[mask] = null_value_out else: csgraph = csgraph_from_masked(csgraph) else: if dense_output: csgraph = csgraph_masked_from_dense(csgraph, copy=copy_if_dense, null_value=null_value_in, nan_null=nan_null, infinity_null=infinity_null) mask = csgraph.mask csgraph = np.asarray(csgraph.data, dtype=DTYPE) csgraph[mask] = null_value_out else: csgraph = csgraph_from_dense(csgraph, null_value=null_value_in, infinity_null=infinity_null, nan_null=nan_null) if csgraph.ndim != 2: raise ValueError("compressed-sparse graph must be 2-D") if csgraph.shape[0] != csgraph.shape[1]: raise ValueError("compressed-sparse graph must be shape (N, N)") return csgraph
2,329
39.877193
77
py
scipy
scipy-main/scipy/sparse/csgraph/_laplacian.py
""" Laplacian of a compressed-sparse graph """ import numpy as np from scipy.sparse import issparse from scipy.sparse.linalg import LinearOperator ############################################################################### # Graph laplacian def laplacian( csgraph, normed=False, return_diag=False, use_out_degree=False, *, copy=True, form="array", dtype=None, symmetrized=False, ): """ Return the Laplacian of a directed graph. Parameters ---------- csgraph : array_like or sparse matrix, 2 dimensions compressed-sparse graph, with shape (N, N). normed : bool, optional If True, then compute symmetrically normalized Laplacian. Default: False. return_diag : bool, optional If True, then also return an array related to vertex degrees. Default: False. use_out_degree : bool, optional If True, then use out-degree instead of in-degree. This distinction matters only if the graph is asymmetric. Default: False. copy: bool, optional If False, then change `csgraph` in place if possible, avoiding doubling the memory use. Default: True, for backward compatibility. form: 'array', or 'function', or 'lo' Determines the format of the output Laplacian: * 'array' is a numpy array; * 'function' is a pointer to evaluating the Laplacian-vector or Laplacian-matrix product; * 'lo' results in the format of the `LinearOperator`. Choosing 'function' or 'lo' always avoids doubling the memory use, ignoring `copy` value. Default: 'array', for backward compatibility. dtype: None or one of numeric numpy dtypes, optional The dtype of the output. If ``dtype=None``, the dtype of the output matches the dtype of the input csgraph, except for the case ``normed=True`` and integer-like csgraph, where the output dtype is 'float' allowing accurate normalization, but dramatically increasing the memory use. Default: None, for backward compatibility. symmetrized: bool, optional If True, then the output Laplacian is symmetric/Hermitian. The symmetrization is done by ``csgraph + csgraph.T.conj`` without dividing by 2 to preserve integer dtypes if possible prior to the construction of the Laplacian. The symmetrization will increase the memory footprint of sparse matrices unless the sparsity pattern is symmetric or `form` is 'function' or 'lo'. Default: False, for backward compatibility. Returns ------- lap : ndarray, or sparse matrix, or `LinearOperator` The N x N Laplacian of csgraph. It will be a NumPy array (dense) if the input was dense, or a sparse matrix otherwise, or the format of a function or `LinearOperator` if `form` equals 'function' or 'lo', respectively. diag : ndarray, optional The length-N main diagonal of the Laplacian matrix. For the normalized Laplacian, this is the array of square roots of vertex degrees or 1 if the degree is zero. Notes ----- The Laplacian matrix of a graph is sometimes referred to as the "Kirchhoff matrix" or just the "Laplacian", and is useful in many parts of spectral graph theory. In particular, the eigen-decomposition of the Laplacian can give insight into many properties of the graph, e.g., is commonly used for spectral data embedding and clustering. The constructed Laplacian doubles the memory use if ``copy=True`` and ``form="array"`` which is the default. Choosing ``copy=False`` has no effect unless ``form="array"`` or the matrix is sparse in the ``coo`` format, or dense array, except for the integer input with ``normed=True`` that forces the float output. Sparse input is reformatted into ``coo`` if ``form="array"``, which is the default. If the input adjacency matrix is not symmetic, the Laplacian is also non-symmetric unless ``symmetrized=True`` is used. Diagonal entries of the input adjacency matrix are ignored and replaced with zeros for the purpose of normalization where ``normed=True``. The normalization uses the inverse square roots of row-sums of the input adjacency matrix, and thus may fail if the row-sums contain negative or complex with a non-zero imaginary part values. The normalization is symmetric, making the normalized Laplacian also symmetric if the input csgraph was symmetric. References ---------- .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix Examples -------- >>> import numpy as np >>> from scipy.sparse import csgraph Our first illustration is the symmetric graph >>> G = np.arange(4) * np.arange(4)[:, np.newaxis] >>> G array([[0, 0, 0, 0], [0, 1, 2, 3], [0, 2, 4, 6], [0, 3, 6, 9]]) and its symmetric Laplacian matrix >>> csgraph.laplacian(G) array([[ 0, 0, 0, 0], [ 0, 5, -2, -3], [ 0, -2, 8, -6], [ 0, -3, -6, 9]]) The non-symmetric graph >>> G = np.arange(9).reshape(3, 3) >>> G array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) has different row- and column sums, resulting in two varieties of the Laplacian matrix, using an in-degree, which is the default >>> L_in_degree = csgraph.laplacian(G) >>> L_in_degree array([[ 9, -1, -2], [-3, 8, -5], [-6, -7, 7]]) or alternatively an out-degree >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True) >>> L_out_degree array([[ 3, -1, -2], [-3, 8, -5], [-6, -7, 13]]) Constructing a symmetric Laplacian matrix, one can add the two as >>> L_in_degree + L_out_degree.T array([[ 12, -4, -8], [ -4, 16, -12], [ -8, -12, 20]]) or use the ``symmetrized=True`` option >>> csgraph.laplacian(G, symmetrized=True) array([[ 12, -4, -8], [ -4, 16, -12], [ -8, -12, 20]]) that is equivalent to symmetrizing the original graph >>> csgraph.laplacian(G + G.T) array([[ 12, -4, -8], [ -4, 16, -12], [ -8, -12, 20]]) The goal of normalization is to make the non-zero diagonal entries of the Laplacian matrix to be all unit, also scaling off-diagonal entries correspondingly. The normalization can be done manually, e.g., >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) >>> L, d = csgraph.laplacian(G, return_diag=True) >>> L array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]) >>> d array([2, 2, 2]) >>> scaling = np.sqrt(d) >>> scaling array([1.41421356, 1.41421356, 1.41421356]) >>> (1/scaling)*L*(1/scaling) array([[ 1. , -0.5, -0.5], [-0.5, 1. , -0.5], [-0.5, -0.5, 1. ]]) Or using ``normed=True`` option >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True) >>> L array([[ 1. , -0.5, -0.5], [-0.5, 1. , -0.5], [-0.5, -0.5, 1. ]]) which now instead of the diagonal returns the scaling coefficients >>> d array([1.41421356, 1.41421356, 1.41421356]) Zero scaling coefficients are substituted with 1s, where scaling has thus no effect, e.g., >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]]) >>> G array([[0, 0, 0], [0, 0, 1], [0, 1, 0]]) >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True) >>> L array([[ 0., -0., -0.], [-0., 1., -1.], [-0., -1., 1.]]) >>> d array([1., 1., 1.]) Only the symmetric normalization is implemented, resulting in a symmetric Laplacian matrix if and only if its graph is symmetric and has all non-negative degrees, like in the examples above. The output Laplacian matrix is by default a dense array or a sparse matrix inferring its shape, format, and dtype from the input graph matrix: >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32) >>> G array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0.]], dtype=float32) >>> csgraph.laplacian(G) array([[ 2., -1., -1.], [-1., 2., -1.], [-1., -1., 2.]], dtype=float32) but can alternatively be generated matrix-free as a LinearOperator: >>> L = csgraph.laplacian(G, form="lo") >>> L <3x3 _CustomLinearOperator with dtype=float32> >>> L(np.eye(3)) array([[ 2., -1., -1.], [-1., 2., -1.], [-1., -1., 2.]]) or as a lambda-function: >>> L = csgraph.laplacian(G, form="function") >>> L <function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598> >>> L(np.eye(3)) array([[ 2., -1., -1.], [-1., 2., -1.], [-1., -1., 2.]]) The Laplacian matrix is used for spectral data clustering and embedding as well as for spectral graph partitioning. Our final example illustrates the latter for a noisy directed linear graph. >>> from scipy.sparse import diags, random >>> from scipy.sparse.linalg import lobpcg Create a directed linear graph with ``N=35`` vertices using a sparse adjacency matrix ``G``: >>> N = 35 >>> G = diags(np.ones(N-1), 1, format="csr") Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``: >>> rng = np.random.default_rng() >>> G += 1e-2 * random(N, N, density=0.1, random_state=rng) Set initial approximations for eigenvectors: >>> X = rng.random((N, 2)) The constant vector of ones is always a trivial eigenvector of the non-normalized Laplacian to be filtered out: >>> Y = np.ones((N, 1)) Alternating (1) the sign of the graph weights allows determining labels for spectral max- and min- cuts in a single loop. Since the graph is undirected, the option ``symmetrized=True`` must be used in the construction of the Laplacian. The option ``normed=True`` cannot be used in (2) for the negative weights here as the symmetric normalization evaluates square roots. The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees a fixed memory footprint and read-only access to the graph. Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector that determines the labels as the signs of its components in (5). Since the sign in an eigenvector is not deterministic and can flip, we fix the sign of the first component to be always +1 in (4). >>> for cut in ["max", "min"]: ... G = -G # 1. ... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2. ... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3. ... eves *= np.sign(eves[0, 0]) # 4. ... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5. max-cut labels: [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1] min-cut labels: [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] As anticipated for a (slightly noisy) linear graph, the max-cut strips all the edges of the graph coloring all odd vertices into one color and all even vertices into another one, while the balanced min-cut partitions the graph in the middle by deleting a single edge. Both determined partitions are optimal. """ if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]: raise ValueError('csgraph must be a square matrix or array') if normed and ( np.issubdtype(csgraph.dtype, np.signedinteger) or np.issubdtype(csgraph.dtype, np.uint) ): csgraph = csgraph.astype(np.float64) if form == "array": create_lap = ( _laplacian_sparse if issparse(csgraph) else _laplacian_dense ) else: create_lap = ( _laplacian_sparse_flo if issparse(csgraph) else _laplacian_dense_flo ) degree_axis = 1 if use_out_degree else 0 lap, d = create_lap( csgraph, normed=normed, axis=degree_axis, copy=copy, form=form, dtype=dtype, symmetrized=symmetrized, ) if return_diag: return lap, d return lap def _setdiag_dense(m, d): step = len(d) + 1 m.flat[::step] = d def _laplace(m, d): return lambda v: v * d[:, np.newaxis] - m @ v def _laplace_normed(m, d, nd): laplace = _laplace(m, d) return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis]) def _laplace_sym(m, d): return ( lambda v: v * d[:, np.newaxis] - m @ v - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m)) ) def _laplace_normed_sym(m, d, nd): laplace_sym = _laplace_sym(m, d) return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis]) def _linearoperator(mv, shape, dtype): return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype) def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized): # The keyword argument `copy` is unused and has no effect here. del copy if dtype is None: dtype = graph.dtype graph_sum = graph.sum(axis=axis).getA1() graph_diagonal = graph.diagonal() diag = graph_sum - graph_diagonal if symmetrized: graph_sum += graph.sum(axis=1 - axis).getA1() diag = graph_sum - graph_diagonal - graph_diagonal if normed: isolated_node_mask = diag == 0 w = np.where(isolated_node_mask, 1, np.sqrt(diag)) if symmetrized: md = _laplace_normed_sym(graph, graph_sum, 1.0 / w) else: md = _laplace_normed(graph, graph_sum, 1.0 / w) if form == "function": return md, w.astype(dtype, copy=False) elif form == "lo": m = _linearoperator(md, shape=graph.shape, dtype=dtype) return m, w.astype(dtype, copy=False) else: raise ValueError(f"Invalid form: {form!r}") else: if symmetrized: md = _laplace_sym(graph, graph_sum) else: md = _laplace(graph, graph_sum) if form == "function": return md, diag.astype(dtype, copy=False) elif form == "lo": m = _linearoperator(md, shape=graph.shape, dtype=dtype) return m, diag.astype(dtype, copy=False) else: raise ValueError(f"Invalid form: {form!r}") def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized): # The keyword argument `form` is unused and has no effect here. del form if dtype is None: dtype = graph.dtype needs_copy = False if graph.format in ('lil', 'dok'): m = graph.tocoo() else: m = graph if copy: needs_copy = True if symmetrized: m += m.T.conj() w = m.sum(axis=axis).getA1() - m.diagonal() if normed: m = m.tocoo(copy=needs_copy) isolated_node_mask = (w == 0) w = np.where(isolated_node_mask, 1, np.sqrt(w)) m.data /= w[m.row] m.data /= w[m.col] m.data *= -1 m.setdiag(1 - isolated_node_mask) else: if m.format == 'dia': m = m.copy() else: m = m.tocoo(copy=needs_copy) m.data *= -1 m.setdiag(w) return m.astype(dtype, copy=False), w.astype(dtype) def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized): if copy: m = np.array(graph) else: m = np.asarray(graph) if dtype is None: dtype = m.dtype graph_sum = m.sum(axis=axis) graph_diagonal = m.diagonal() diag = graph_sum - graph_diagonal if symmetrized: graph_sum += m.sum(axis=1 - axis) diag = graph_sum - graph_diagonal - graph_diagonal if normed: isolated_node_mask = diag == 0 w = np.where(isolated_node_mask, 1, np.sqrt(diag)) if symmetrized: md = _laplace_normed_sym(m, graph_sum, 1.0 / w) else: md = _laplace_normed(m, graph_sum, 1.0 / w) if form == "function": return md, w.astype(dtype, copy=False) elif form == "lo": m = _linearoperator(md, shape=graph.shape, dtype=dtype) return m, w.astype(dtype, copy=False) else: raise ValueError(f"Invalid form: {form!r}") else: if symmetrized: md = _laplace_sym(m, graph_sum) else: md = _laplace(m, graph_sum) if form == "function": return md, diag.astype(dtype, copy=False) elif form == "lo": m = _linearoperator(md, shape=graph.shape, dtype=dtype) return m, diag.astype(dtype, copy=False) else: raise ValueError(f"Invalid form: {form!r}") def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized): if form != "array": raise ValueError(f'{form!r} must be "array"') if dtype is None: dtype = graph.dtype if copy: m = np.array(graph) else: m = np.asarray(graph) if dtype is None: dtype = m.dtype if symmetrized: m += m.T.conj() np.fill_diagonal(m, 0) w = m.sum(axis=axis) if normed: isolated_node_mask = (w == 0) w = np.where(isolated_node_mask, 1, np.sqrt(w)) m /= w m /= w[:, np.newaxis] m *= -1 _setdiag_dense(m, 1 - isolated_node_mask) else: m *= -1 _setdiag_dense(m, w) return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
17,827
31.064748
79
py
scipy
scipy-main/scipy/sparse/csgraph/__init__.py
r""" Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`) ============================================================== .. currentmodule:: scipy.sparse.csgraph Fast graph algorithms based on sparse matrix representations. Contents -------- .. autosummary:: :toctree: generated/ connected_components -- determine connected components of a graph laplacian -- compute the laplacian of a graph shortest_path -- compute the shortest path between points on a positive graph dijkstra -- use Dijkstra's algorithm for shortest path floyd_warshall -- use the Floyd-Warshall algorithm for shortest path bellman_ford -- use the Bellman-Ford algorithm for shortest path johnson -- use Johnson's algorithm for shortest path breadth_first_order -- compute a breadth-first order of nodes depth_first_order -- compute a depth-first order of nodes breadth_first_tree -- construct the breadth-first tree from a given node depth_first_tree -- construct a depth-first tree from a given node minimum_spanning_tree -- construct the minimum spanning tree of a graph reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering maximum_flow -- solve the maximum flow problem for a graph maximum_bipartite_matching -- compute a maximum matching of a bipartite graph min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph structural_rank -- compute the structural rank of a graph NegativeCycleError .. autosummary:: :toctree: generated/ construct_dist_matrix csgraph_from_dense csgraph_from_masked csgraph_masked_from_dense csgraph_to_dense csgraph_to_masked reconstruct_path Graph Representations --------------------- This module uses graphs which are stored in a matrix format. A graph with N nodes can be represented by an (N x N) adjacency matrix G. If there is a connection from node i to node j, then G[i, j] = w, where w is the weight of the connection. For nodes i and j which are not connected, the value depends on the representation: - for dense array representations, non-edges are represented by G[i, j] = 0, infinity, or NaN. - for dense masked representations (of type np.ma.MaskedArray), non-edges are represented by masked values. This can be useful when graphs with zero-weight edges are desired. - for sparse array representations, non-edges are represented by non-entries in the matrix. This sort of sparse representation also allows for edges with zero weights. As a concrete example, imagine that you would like to represent the following undirected graph:: G (0) / \ 1 2 / \ (2) (1) This graph has three nodes, where node 0 and 1 are connected by an edge of weight 2, and nodes 0 and 2 are connected by an edge of weight 1. We can construct the dense, masked, and sparse representations as follows, keeping in mind that an undirected graph is represented by a symmetric matrix:: >>> import numpy as np >>> G_dense = np.array([[0, 2, 1], ... [2, 0, 0], ... [1, 0, 0]]) >>> G_masked = np.ma.masked_values(G_dense, 0) >>> from scipy.sparse import csr_matrix >>> G_sparse = csr_matrix(G_dense) This becomes more difficult when zero edges are significant. For example, consider the situation when we slightly modify the above graph:: G2 (0) / \ 0 2 / \ (2) (1) This is identical to the previous graph, except nodes 0 and 2 are connected by an edge of zero weight. In this case, the dense representation above leads to ambiguities: how can non-edges be represented if zero is a meaningful value? In this case, either a masked or sparse representation must be used to eliminate the ambiguity:: >>> import numpy as np >>> G2_data = np.array([[np.inf, 2, 0 ], ... [2, np.inf, np.inf], ... [0, np.inf, np.inf]]) >>> G2_masked = np.ma.masked_invalid(G2_data) >>> from scipy.sparse.csgraph import csgraph_from_dense >>> # G2_sparse = csr_matrix(G2_data) would give the wrong result >>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf) >>> G2_sparse.data array([ 2., 0., 2., 0.]) Here we have used a utility routine from the csgraph submodule in order to convert the dense representation to a sparse representation which can be understood by the algorithms in submodule. By viewing the data array, we can see that the zero values are explicitly encoded in the graph. Directed vs. undirected ^^^^^^^^^^^^^^^^^^^^^^^ Matrices may represent either directed or undirected graphs. This is specified throughout the csgraph module by a boolean keyword. Graphs are assumed to be directed by default. In a directed graph, traversal from node i to node j can be accomplished over the edge G[i, j], but not the edge G[j, i]. Consider the following dense graph:: >>> import numpy as np >>> G_dense = np.array([[0, 1, 0], ... [2, 0, 3], ... [0, 4, 0]]) When ``directed=True`` we get the graph:: ---1--> ---3--> (0) (1) (2) <--2--- <--4--- In a non-directed graph, traversal from node i to node j can be accomplished over either G[i, j] or G[j, i]. If both edges are not null, and the two have unequal weights, then the smaller of the two is used. So for the same graph, when ``directed=False`` we get the graph:: (0)--1--(1)--3--(2) Note that a symmetric matrix will represent an undirected graph, regardless of whether the 'directed' keyword is set to True or False. In this case, using ``directed=True`` generally leads to more efficient computation. The routines in this module accept as input either scipy.sparse representations (csr, csc, or lil format), masked representations, or dense representations with non-edges indicated by zeros, infinities, and NaN entries. """ __docformat__ = "restructuredtext en" __all__ = ['connected_components', 'laplacian', 'shortest_path', 'floyd_warshall', 'dijkstra', 'bellman_ford', 'johnson', 'breadth_first_order', 'depth_first_order', 'breadth_first_tree', 'depth_first_tree', 'minimum_spanning_tree', 'reverse_cuthill_mckee', 'maximum_flow', 'maximum_bipartite_matching', 'min_weight_full_bipartite_matching', 'structural_rank', 'construct_dist_matrix', 'reconstruct_path', 'csgraph_masked_from_dense', 'csgraph_from_dense', 'csgraph_from_masked', 'csgraph_to_dense', 'csgraph_to_masked', 'NegativeCycleError'] from ._laplacian import laplacian from ._shortest_path import ( shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson, NegativeCycleError ) from ._traversal import ( breadth_first_order, depth_first_order, breadth_first_tree, depth_first_tree, connected_components ) from ._min_spanning_tree import minimum_spanning_tree from ._flow import maximum_flow from ._matching import ( maximum_bipartite_matching, min_weight_full_bipartite_matching ) from ._reordering import reverse_cuthill_mckee, structural_rank from ._tools import ( construct_dist_matrix, reconstruct_path, csgraph_from_dense, csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked, csgraph_to_masked ) from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
7,739
36.033493
99
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_spanning_tree.py
"""Test the minimum spanning tree function""" import numpy as np from numpy.testing import assert_ import numpy.testing as npt from scipy.sparse import csr_matrix from scipy.sparse.csgraph import minimum_spanning_tree def test_minimum_spanning_tree(): # Create a graph with two connected components. graph = [[0,1,0,0,0], [1,0,0,0,0], [0,0,0,8,5], [0,0,8,0,1], [0,0,5,1,0]] graph = np.asarray(graph) # Create the expected spanning tree. expected = [[0,1,0,0,0], [0,0,0,0,0], [0,0,0,0,5], [0,0,0,0,1], [0,0,0,0,0]] expected = np.asarray(expected) # Ensure minimum spanning tree code gives this expected output. csgraph = csr_matrix(graph) mintree = minimum_spanning_tree(csgraph) npt.assert_array_equal(mintree.toarray(), expected, 'Incorrect spanning tree found.') # Ensure that the original graph was not modified. npt.assert_array_equal(csgraph.toarray(), graph, 'Original graph was modified.') # Now let the algorithm modify the csgraph in place. mintree = minimum_spanning_tree(csgraph, overwrite=True) npt.assert_array_equal(mintree.toarray(), expected, 'Graph was not properly modified to contain MST.') np.random.seed(1234) for N in (5, 10, 15, 20): # Create a random graph. graph = 3 + np.random.random((N, N)) csgraph = csr_matrix(graph) # The spanning tree has at most N - 1 edges. mintree = minimum_spanning_tree(csgraph) assert_(mintree.nnz < N) # Set the sub diagonal to 1 to create a known spanning tree. idx = np.arange(N-1) graph[idx,idx+1] = 1 csgraph = csr_matrix(graph) mintree = minimum_spanning_tree(csgraph) # We expect to see this pattern in the spanning tree and otherwise # have this zero. expected = np.zeros((N, N)) expected[idx, idx+1] = 1 npt.assert_array_equal(mintree.toarray(), expected, 'Incorrect spanning tree found.')
2,115
31.060606
74
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_flow.py
import numpy as np from numpy.testing import assert_array_equal import pytest from scipy.sparse import csr_matrix, csc_matrix from scipy.sparse.csgraph import maximum_flow from scipy.sparse.csgraph._flow import ( _add_reverse_edges, _make_edge_pointers, _make_tails ) methods = ['edmonds_karp', 'dinic'] def test_raises_on_dense_input(): with pytest.raises(TypeError): graph = np.array([[0, 1], [0, 0]]) maximum_flow(graph, 0, 1) maximum_flow(graph, 0, 1, method='edmonds_karp') def test_raises_on_csc_input(): with pytest.raises(TypeError): graph = csc_matrix([[0, 1], [0, 0]]) maximum_flow(graph, 0, 1) maximum_flow(graph, 0, 1, method='edmonds_karp') def test_raises_on_floating_point_input(): with pytest.raises(ValueError): graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64) maximum_flow(graph, 0, 1) maximum_flow(graph, 0, 1, method='edmonds_karp') def test_raises_on_non_square_input(): with pytest.raises(ValueError): graph = csr_matrix([[0, 1, 2], [2, 1, 0]]) maximum_flow(graph, 0, 1) def test_raises_when_source_is_sink(): with pytest.raises(ValueError): graph = csr_matrix([[0, 1], [0, 0]]) maximum_flow(graph, 0, 0) maximum_flow(graph, 0, 0, method='edmonds_karp') @pytest.mark.parametrize('method', methods) @pytest.mark.parametrize('source', [-1, 2, 3]) def test_raises_when_source_is_out_of_bounds(source, method): with pytest.raises(ValueError): graph = csr_matrix([[0, 1], [0, 0]]) maximum_flow(graph, source, 1, method=method) @pytest.mark.parametrize('method', methods) @pytest.mark.parametrize('sink', [-1, 2, 3]) def test_raises_when_sink_is_out_of_bounds(sink, method): with pytest.raises(ValueError): graph = csr_matrix([[0, 1], [0, 0]]) maximum_flow(graph, 0, sink, method=method) @pytest.mark.parametrize('method', methods) def test_simple_graph(method): # This graph looks as follows: # (0) --5--> (1) graph = csr_matrix([[0, 5], [0, 0]]) res = maximum_flow(graph, 0, 1, method=method) assert res.flow_value == 5 expected_flow = np.array([[0, 5], [-5, 0]]) assert_array_equal(res.flow.toarray(), expected_flow) @pytest.mark.parametrize('method', methods) def test_bottle_neck_graph(method): # This graph cannot use the full capacity between 0 and 1: # (0) --5--> (1) --3--> (2) graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]]) res = maximum_flow(graph, 0, 2, method=method) assert res.flow_value == 3 expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]]) assert_array_equal(res.flow.toarray(), expected_flow) @pytest.mark.parametrize('method', methods) def test_backwards_flow(method): # This example causes backwards flow between vertices 3 and 4, # and so this test ensures that we handle that accordingly. See # https://stackoverflow.com/q/38843963/5085211 # for more information. graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0], [0, 0, 10, 0, 0, 0, 0, 0], [0, 0, 0, 10, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 10], [0, 0, 0, 10, 0, 10, 0, 0], [0, 0, 0, 0, 0, 0, 10, 0], [0, 0, 0, 0, 0, 0, 0, 10], [0, 0, 0, 0, 0, 0, 0, 0]]) res = maximum_flow(graph, 0, 7, method=method) assert res.flow_value == 20 expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0], [-10, 0, 10, 0, 0, 0, 0, 0], [0, -10, 0, 10, 0, 0, 0, 0], [0, 0, -10, 0, 0, 0, 0, 10], [-10, 0, 0, 0, 0, 10, 0, 0], [0, 0, 0, 0, -10, 0, 10, 0], [0, 0, 0, 0, 0, -10, 0, 10], [0, 0, 0, -10, 0, 0, -10, 0]]) assert_array_equal(res.flow.toarray(), expected_flow) @pytest.mark.parametrize('method', methods) def test_example_from_clrs_chapter_26_1(method): # See page 659 in CLRS second edition, but note that the maximum flow # we find is slightly different than the one in CLRS; we push a flow of # 12 to v_1 instead of v_2. graph = csr_matrix([[0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0]]) res = maximum_flow(graph, 0, 5, method=method) assert res.flow_value == 23 expected_flow = np.array([[0, 12, 11, 0, 0, 0], [-12, 0, 0, 12, 0, 0], [-11, 0, 0, 0, 11, 0], [0, -12, 0, 0, -7, 19], [0, 0, -11, 7, 0, 4], [0, 0, 0, -19, -4, 0]]) assert_array_equal(res.flow.toarray(), expected_flow) @pytest.mark.parametrize('method', methods) def test_disconnected_graph(method): # This tests the following disconnected graph: # (0) --5--> (1) (2) --3--> (3) graph = csr_matrix([[0, 5, 0, 0], [0, 0, 0, 0], [0, 0, 9, 3], [0, 0, 0, 0]]) res = maximum_flow(graph, 0, 3, method=method) assert res.flow_value == 0 expected_flow = np.zeros((4, 4), dtype=np.int32) assert_array_equal(res.flow.toarray(), expected_flow) @pytest.mark.parametrize('method', methods) def test_add_reverse_edges_large_graph(method): # Regression test for https://github.com/scipy/scipy/issues/14385 n = 100_000 indices = np.arange(1, n) indptr = np.array(list(range(n)) + [n - 1]) data = np.ones(n - 1, dtype=np.int32) graph = csr_matrix((data, indices, indptr), shape=(n, n)) res = maximum_flow(graph, 0, n - 1, method=method) assert res.flow_value == 1 expected_flow = graph - graph.transpose() assert_array_equal(res.flow.data, expected_flow.data) assert_array_equal(res.flow.indices, expected_flow.indices) assert_array_equal(res.flow.indptr, expected_flow.indptr) @pytest.mark.parametrize("a,b_data_expected", [ ([[]], []), ([[0], [0]], []), ([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]), ([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])]) def test_add_reverse_edges(a, b_data_expected): """Test that the reversal of the edges of the input graph works as expected. """ a = csr_matrix(a, dtype=np.int32, shape=(len(a), len(a))) b = _add_reverse_edges(a) assert_array_equal(b.data, b_data_expected) @pytest.mark.parametrize("a,expected", [ ([[]], []), ([[0]], []), ([[1]], [0]), ([[0, 1], [10, 0]], [1, 0]), ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2]) ]) def test_make_edge_pointers(a, expected): a = csr_matrix(a, dtype=np.int32) rev_edge_ptr = _make_edge_pointers(a) assert_array_equal(rev_edge_ptr, expected) @pytest.mark.parametrize("a,expected", [ ([[]], []), ([[0]], []), ([[1]], [0]), ([[0, 1], [10, 0]], [0, 1]), ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2]) ]) def test_make_tails(a, expected): a = csr_matrix(a, dtype=np.int32) tails = _make_tails(a) assert_array_equal(tails, expected)
7,420
35.737624
75
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_conversions.py
import numpy as np from numpy.testing import assert_array_almost_equal from scipy.sparse import csr_matrix from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense def test_csgraph_from_dense(): np.random.seed(1234) G = np.random.random((10, 10)) some_nulls = (G < 0.4) all_nulls = (G < 0.8) for null_value in [0, np.nan, np.inf]: G[all_nulls] = null_value with np.errstate(invalid="ignore"): G_csr = csgraph_from_dense(G, null_value=0) G[all_nulls] = 0 assert_array_almost_equal(G, G_csr.toarray()) for null_value in [np.nan, np.inf]: G[all_nulls] = 0 G[some_nulls] = null_value with np.errstate(invalid="ignore"): G_csr = csgraph_from_dense(G, null_value=0) G[all_nulls] = 0 assert_array_almost_equal(G, G_csr.toarray()) def test_csgraph_to_dense(): np.random.seed(1234) G = np.random.random((10, 10)) nulls = (G < 0.8) G[nulls] = np.inf G_csr = csgraph_from_dense(G) for null_value in [0, 10, -np.inf, np.inf]: G[nulls] = null_value assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value)) def test_multiple_edges(): # create a random sqare matrix with an even number of elements np.random.seed(1234) X = np.random.random((10, 10)) Xcsr = csr_matrix(X) # now double-up every other column Xcsr.indices[::2] = Xcsr.indices[1::2] # normal sparse toarray() will sum the duplicated edges Xdense = Xcsr.toarray() assert_array_almost_equal(Xdense[:, 1::2], X[:, ::2] + X[:, 1::2]) # csgraph_to_dense chooses the minimum of each duplicated edge Xdense = csgraph_to_dense(Xcsr) assert_array_almost_equal(Xdense[:, 1::2], np.minimum(X[:, ::2], X[:, 1::2]))
1,855
28.935484
73
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_graph_laplacian.py
import pytest import numpy as np from numpy.testing import assert_allclose from pytest import raises as assert_raises from scipy import sparse from scipy.sparse import csgraph def check_int_type(mat): return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype( mat.dtype, np.uint ) def test_laplacian_value_error(): for t in int, float, complex: for m in ([1, 1], [[[1]]], [[1, 2, 3], [4, 5, 6]], [[1, 2], [3, 4], [5, 5]]): A = np.array(m, dtype=t) assert_raises(ValueError, csgraph.laplacian, A) def _explicit_laplacian(x, normed=False): if sparse.issparse(x): x = x.toarray() x = np.asarray(x) y = -1.0 * x for j in range(y.shape[0]): y[j,j] = x[j,j+1:].sum() + x[j,:j].sum() if normed: d = np.diag(y).copy() d[d == 0] = 1.0 y /= d[:,None]**.5 y /= d[None,:]**.5 return y def _check_symmetric_graph_laplacian(mat, normed, copy=True): if not hasattr(mat, 'shape'): mat = eval(mat, dict(np=np, sparse=sparse)) if sparse.issparse(mat): sp_mat = mat mat = sp_mat.toarray() else: sp_mat = sparse.csr_matrix(mat) mat_copy = np.copy(mat) sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True) n_nodes = mat.shape[0] explicit_laplacian = _explicit_laplacian(mat, normed=normed) laplacian = csgraph.laplacian(mat, normed=normed, copy=copy) sp_laplacian = csgraph.laplacian(sp_mat, normed=normed, copy=copy) if copy: assert_allclose(mat, mat_copy) _assert_allclose_sparse(sp_mat, sp_mat_copy) else: if not (normed and check_int_type(mat)): assert_allclose(laplacian, mat) if sp_mat.format == 'coo': _assert_allclose_sparse(sp_laplacian, sp_mat) assert_allclose(laplacian, sp_laplacian.toarray()) for tested in [laplacian, sp_laplacian.toarray()]: if not normed: assert_allclose(tested.sum(axis=0), np.zeros(n_nodes)) assert_allclose(tested.T, tested) assert_allclose(tested, explicit_laplacian) def test_symmetric_graph_laplacian(): symmetric_mats = ( 'np.arange(10) * np.arange(10)[:, np.newaxis]', 'np.ones((7, 7))', 'np.eye(19)', 'sparse.diags([1, 1], [-1, 1], shape=(4, 4))', 'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()', 'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()', 'np.vander(np.arange(4)) + np.vander(np.arange(4)).T' ) for mat in symmetric_mats: for normed in True, False: for copy in True, False: _check_symmetric_graph_laplacian(mat, normed, copy) def _assert_allclose_sparse(a, b, **kwargs): # helper function that can deal with sparse matrices if sparse.issparse(a): a = a.toarray() if sparse.issparse(b): b = b.toarray() assert_allclose(a, b, **kwargs) def _check_laplacian_dtype_none( A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type ): mat = arr_type(A, dtype=dtype) L, d = csgraph.laplacian( mat, normed=normed, return_diag=True, use_out_degree=use_out_degree, copy=copy, dtype=None, ) if normed and check_int_type(mat): assert L.dtype == np.float64 assert d.dtype == np.float64 _assert_allclose_sparse(L, desired_L, atol=1e-12) _assert_allclose_sparse(d, desired_d, atol=1e-12) else: assert L.dtype == dtype assert d.dtype == dtype desired_L = np.asarray(desired_L).astype(dtype) desired_d = np.asarray(desired_d).astype(dtype) _assert_allclose_sparse(L, desired_L, atol=1e-12) _assert_allclose_sparse(d, desired_d, atol=1e-12) if not copy: if not (normed and check_int_type(mat)): if type(mat) is np.ndarray: assert_allclose(L, mat) elif mat.format == "coo": _assert_allclose_sparse(L, mat) def _check_laplacian_dtype( A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type ): mat = arr_type(A, dtype=dtype) L, d = csgraph.laplacian( mat, normed=normed, return_diag=True, use_out_degree=use_out_degree, copy=copy, dtype=dtype, ) assert L.dtype == dtype assert d.dtype == dtype desired_L = np.asarray(desired_L).astype(dtype) desired_d = np.asarray(desired_d).astype(dtype) _assert_allclose_sparse(L, desired_L, atol=1e-12) _assert_allclose_sparse(d, desired_d, atol=1e-12) if not copy: if not (normed and check_int_type(mat)): if type(mat) is np.ndarray: assert_allclose(L, mat) elif mat.format == 'coo': _assert_allclose_sparse(L, mat) INT_DTYPES = {np.intc, np.int_, np.longlong} REAL_DTYPES = {np.single, np.double, np.longdouble} COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble} # use sorted tuple to ensure fixed order of tests DTYPES = tuple(sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str)) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("arr_type", [np.array, sparse.csr_matrix, sparse.coo_matrix]) @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize("normed", [True, False]) @pytest.mark.parametrize("use_out_degree", [True, False]) def test_asymmetric_laplacian(use_out_degree, normed, copy, dtype, arr_type): # adjacency matrix A = [[0, 1, 0], [4, 2, 0], [0, 0, 0]] A = arr_type(np.array(A), dtype=dtype) A_copy = A.copy() if not normed and use_out_degree: # Laplacian matrix using out-degree L = [[1, -1, 0], [-4, 4, 0], [0, 0, 0]] d = [1, 4, 0] if normed and use_out_degree: # normalized Laplacian matrix using out-degree L = [[1, -0.5, 0], [-2, 1, 0], [0, 0, 0]] d = [1, 2, 1] if not normed and not use_out_degree: # Laplacian matrix using in-degree L = [[4, -1, 0], [-4, 1, 0], [0, 0, 0]] d = [4, 1, 0] if normed and not use_out_degree: # normalized Laplacian matrix using in-degree L = [[1, -0.5, 0], [-2, 1, 0], [0, 0, 0]] d = [2, 1, 1] _check_laplacian_dtype_none( A, L, d, normed=normed, use_out_degree=use_out_degree, copy=copy, dtype=dtype, arr_type=arr_type, ) _check_laplacian_dtype( A_copy, L, d, normed=normed, use_out_degree=use_out_degree, copy=copy, dtype=dtype, arr_type=arr_type, ) @pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil', 'dok', 'dia', 'bsr']) @pytest.mark.parametrize("normed", [True, False]) @pytest.mark.parametrize("copy", [True, False]) def test_sparse_formats(fmt, normed, copy): mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt) _check_symmetric_graph_laplacian(mat, normed, copy) @pytest.mark.parametrize( "arr_type", [np.asarray, sparse.csr_matrix, sparse.coo_matrix] ) @pytest.mark.parametrize("form", ["array", "function", "lo"]) def test_laplacian_symmetrized(arr_type, form): # adjacency matrix n = 3 mat = arr_type(np.arange(n * n).reshape(n, n)) L_in, d_in = csgraph.laplacian( mat, return_diag=True, form=form, ) L_out, d_out = csgraph.laplacian( mat, return_diag=True, use_out_degree=True, form=form, ) Ls, ds = csgraph.laplacian( mat, return_diag=True, symmetrized=True, form=form, ) Ls_normed, ds_normed = csgraph.laplacian( mat, return_diag=True, symmetrized=True, normed=True, form=form, ) mat += mat.T Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form) Lss_normed, dss_normed = csgraph.laplacian( mat, return_diag=True, normed=True, form=form, ) assert_allclose(ds, d_in + d_out) assert_allclose(ds, dss) assert_allclose(ds_normed, dss_normed) d = {} for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]: if form == "array": d[L] = eval(L) else: d[L] = eval(L)(np.eye(n, dtype=mat.dtype)) _assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T) _assert_allclose_sparse(d["Ls"], d["Lss"]) _assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"]) @pytest.mark.parametrize( "arr_type", [np.asarray, sparse.csr_matrix, sparse.coo_matrix] ) @pytest.mark.parametrize("dtype", DTYPES) @pytest.mark.parametrize("normed", [True, False]) @pytest.mark.parametrize("symmetrized", [True, False]) @pytest.mark.parametrize("use_out_degree", [True, False]) @pytest.mark.parametrize("form", ["function", "lo"]) def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form): n = 3 mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]] mat = arr_type(np.array(mat), dtype=dtype) Lo, do = csgraph.laplacian( mat, return_diag=True, normed=normed, symmetrized=symmetrized, use_out_degree=use_out_degree, dtype=dtype, ) La, da = csgraph.laplacian( mat, return_diag=True, normed=normed, symmetrized=symmetrized, use_out_degree=use_out_degree, dtype=dtype, form="array", ) assert_allclose(do, da) _assert_allclose_sparse(Lo, La) L, d = csgraph.laplacian( mat, return_diag=True, normed=normed, symmetrized=symmetrized, use_out_degree=use_out_degree, dtype=dtype, form=form, ) assert_allclose(d, do) assert d.dtype == dtype Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype) _assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7) x = np.arange(6).reshape(3, 2) if not (normed and dtype in INT_DTYPES): assert_allclose(L(x), Lo @ x) else: # Normalized Lo is casted to integer, but L() is not pass def test_format_error_message(): with pytest.raises(ValueError, match="Invalid form: 'toto'"): _ = csgraph.laplacian(np.eye(1), form='toto')
10,623
28.593315
76
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_matching.py
from itertools import product import numpy as np from numpy.testing import assert_array_equal, assert_equal import pytest from scipy.sparse import csr_matrix, coo_matrix, diags from scipy.sparse.csgraph import ( maximum_bipartite_matching, min_weight_full_bipartite_matching ) def test_maximum_bipartite_matching_raises_on_dense_input(): with pytest.raises(TypeError): graph = np.array([[0, 1], [0, 0]]) maximum_bipartite_matching(graph) def test_maximum_bipartite_matching_empty_graph(): graph = csr_matrix((0, 0)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') expected_matching = np.array([]) assert_array_equal(expected_matching, x) assert_array_equal(expected_matching, y) def test_maximum_bipartite_matching_empty_left_partition(): graph = csr_matrix((2, 0)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') assert_array_equal(np.array([]), x) assert_array_equal(np.array([-1, -1]), y) def test_maximum_bipartite_matching_empty_right_partition(): graph = csr_matrix((0, 3)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') assert_array_equal(np.array([-1, -1, -1]), x) assert_array_equal(np.array([]), y) def test_maximum_bipartite_matching_graph_with_no_edges(): graph = csr_matrix((2, 2)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') assert_array_equal(np.array([-1, -1]), x) assert_array_equal(np.array([-1, -1]), y) def test_maximum_bipartite_matching_graph_that_causes_augmentation(): # In this graph, column 1 is initially assigned to row 1, but it should be # reassigned to make room for row 2. graph = csr_matrix([[1, 1], [1, 0]]) x = maximum_bipartite_matching(graph, perm_type='column') y = maximum_bipartite_matching(graph, perm_type='row') expected_matching = np.array([1, 0]) assert_array_equal(expected_matching, x) assert_array_equal(expected_matching, y) def test_maximum_bipartite_matching_graph_with_more_rows_than_columns(): graph = csr_matrix([[1, 1], [1, 0], [0, 1]]) x = maximum_bipartite_matching(graph, perm_type='column') y = maximum_bipartite_matching(graph, perm_type='row') assert_array_equal(np.array([0, -1, 1]), x) assert_array_equal(np.array([0, 2]), y) def test_maximum_bipartite_matching_graph_with_more_columns_than_rows(): graph = csr_matrix([[1, 1, 0], [0, 0, 1]]) x = maximum_bipartite_matching(graph, perm_type='column') y = maximum_bipartite_matching(graph, perm_type='row') assert_array_equal(np.array([0, 2]), x) assert_array_equal(np.array([0, -1, 1]), y) def test_maximum_bipartite_matching_explicit_zeros_count_as_edges(): data = [0, 0] indices = [1, 0] indptr = [0, 1, 2] graph = csr_matrix((data, indices, indptr), shape=(2, 2)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') expected_matching = np.array([1, 0]) assert_array_equal(expected_matching, x) assert_array_equal(expected_matching, y) def test_maximum_bipartite_matching_feasibility_of_result(): # This is a regression test for GitHub issue #11458 data = np.ones(50, dtype=int) indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14] indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45, 47, 47, 48, 50] graph = csr_matrix((data, indices, indptr), shape=(20, 25)) x = maximum_bipartite_matching(graph, perm_type='row') y = maximum_bipartite_matching(graph, perm_type='column') assert (x != -1).sum() == 13 assert (y != -1).sum() == 13 # Ensure that each element of the matching is in fact an edge in the graph. for u, v in zip(range(graph.shape[0]), y): if v != -1: assert graph[u, v] for u, v in zip(x, range(graph.shape[1])): if u != -1: assert graph[u, v] def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex(): np.random.seed(42) A = diags(np.ones(25), offsets=0, format='csr') rand_perm = np.random.permutation(25) rand_perm2 = np.random.permutation(25) Rrow = np.arange(25) Rcol = rand_perm Rdata = np.ones(25, dtype=int) Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr() Crow = rand_perm2 Ccol = np.arange(25) Cdata = np.ones(25, dtype=int) Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr() # Randomly permute identity matrix B = Rmat * A * Cmat # Row permute perm = maximum_bipartite_matching(B, perm_type='row') Rrow = np.arange(25) Rcol = perm Rdata = np.ones(25, dtype=int) Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr() C1 = Rmat * B # Column permute perm2 = maximum_bipartite_matching(B, perm_type='column') Crow = perm2 Ccol = np.arange(25) Cdata = np.ones(25, dtype=int) Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr() C2 = B * Cmat # Should get identity matrix back assert_equal(any(C1.diagonal() == 0), False) assert_equal(any(C2.diagonal() == 0), False) @pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)]) def test_min_weight_full_matching_trivial_graph(num_rows, num_cols): biadjacency_matrix = csr_matrix((num_cols, num_rows)) row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency_matrix) assert len(row_ind) == 0 assert len(col_ind) == 0 @pytest.mark.parametrize('biadjacency_matrix', [ [[1, 1, 1], [1, 0, 0], [1, 0, 0]], [[1, 1, 1], [0, 0, 1], [0, 0, 1]], [[1, 0, 0], [2, 0, 0]], [[0, 1, 0], [0, 2, 0]], [[1, 0], [2, 0], [5, 0]] ]) def test_min_weight_full_matching_infeasible_problems(biadjacency_matrix): with pytest.raises(ValueError): min_weight_full_bipartite_matching(csr_matrix(biadjacency_matrix)) def test_explicit_zero_causes_warning(): with pytest.warns(UserWarning): biadjacency_matrix = csr_matrix(((2, 0, 3), (0, 1, 1), (0, 2, 3))) min_weight_full_bipartite_matching(biadjacency_matrix) # General test for linear sum assignment solvers to make it possible to rely # on the same tests for scipy.optimize.linear_sum_assignment. def linear_sum_assignment_assertions( solver, array_type, sign, test_case ): cost_matrix, expected_cost = test_case maximize = sign == -1 cost_matrix = sign * array_type(cost_matrix) expected_cost = sign * np.array(expected_cost) row_ind, col_ind = solver(cost_matrix, maximize=maximize) assert_array_equal(row_ind, np.sort(row_ind)) assert_array_equal(expected_cost, np.array(cost_matrix[row_ind, col_ind]).flatten()) cost_matrix = cost_matrix.T row_ind, col_ind = solver(cost_matrix, maximize=maximize) assert_array_equal(row_ind, np.sort(row_ind)) assert_array_equal(np.sort(expected_cost), np.sort(np.array( cost_matrix[row_ind, col_ind])).flatten()) linear_sum_assignment_test_cases = product( [-1, 1], [ # Square ([[400, 150, 400], [400, 450, 600], [300, 225, 300]], [150, 400, 300]), # Rectangular variant ([[400, 150, 400, 1], [400, 450, 600, 2], [300, 225, 300, 3]], [150, 2, 300]), ([[10, 10, 8], [9, 8, 1], [9, 7, 4]], [10, 1, 7]), # Square ([[10, 10, 8, 11], [9, 8, 1, 1], [9, 7, 4, 10]], [10, 1, 4]), # Rectangular variant ([[10, float("inf"), float("inf")], [float("inf"), float("inf"), 1], [float("inf"), 7, float("inf")]], [10, 1, 7]) ]) @pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases) def test_min_weight_full_matching_small_inputs(sign, test_case): linear_sum_assignment_assertions( min_weight_full_bipartite_matching, csr_matrix, sign, test_case)
8,532
34.554167
79
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_connected_components.py
import numpy as np from numpy.testing import assert_equal, assert_array_almost_equal from scipy.sparse import csgraph def test_weak_connections(): Xde = np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) Xsp = csgraph.csgraph_from_dense(Xde, null_value=0) for X in Xsp, Xde: n_components, labels =\ csgraph.connected_components(X, directed=True, connection='weak') assert_equal(n_components, 2) assert_array_almost_equal(labels, [0, 0, 1]) def test_strong_connections(): X1de = np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]]) X2de = X1de + X1de.T X1sp = csgraph.csgraph_from_dense(X1de, null_value=0) X2sp = csgraph.csgraph_from_dense(X2de, null_value=0) for X in X1sp, X1de: n_components, labels =\ csgraph.connected_components(X, directed=True, connection='strong') assert_equal(n_components, 3) labels.sort() assert_array_almost_equal(labels, [0, 1, 2]) for X in X2sp, X2de: n_components, labels =\ csgraph.connected_components(X, directed=True, connection='strong') assert_equal(n_components, 2) labels.sort() assert_array_almost_equal(labels, [0, 0, 1]) def test_strong_connections2(): X = np.array([[0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0]]) n_components, labels =\ csgraph.connected_components(X, directed=True, connection='strong') assert_equal(n_components, 5) labels.sort() assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4]) def test_weak_connections2(): X = np.array([[0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0]]) n_components, labels =\ csgraph.connected_components(X, directed=True, connection='weak') assert_equal(n_components, 2) labels.sort() assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1]) def test_ticket1876(): # Regression test: this failed in the original implementation # There should be two strongly-connected components; previously gave one g = np.array([[0, 1, 1, 0], [1, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 0]]) n_components, labels = csgraph.connected_components(g, connection='strong') assert_equal(n_components, 2) assert_equal(labels[0], labels[1]) assert_equal(labels[2], labels[3]) def test_fully_connected_graph(): # Fully connected dense matrices raised an exception. # https://github.com/scipy/scipy/issues/3818 g = np.ones((4, 4)) n_components, labels = csgraph.connected_components(g) assert_equal(n_components, 1)
3,199
31
79
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_reordering.py
import numpy as np from numpy.testing import assert_equal from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank from scipy.sparse import csc_matrix, csr_matrix, coo_matrix def test_graph_reverse_cuthill_mckee(): A = np.array([[1, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 1], [0, 1, 1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 0, 1, 0, 1]], dtype=int) graph = csr_matrix(A) perm = reverse_cuthill_mckee(graph) correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0]) assert_equal(perm, correct_perm) # Test int64 indices input graph.indices = graph.indices.astype('int64') graph.indptr = graph.indptr.astype('int64') perm = reverse_cuthill_mckee(graph, True) assert_equal(perm, correct_perm) def test_graph_reverse_cuthill_mckee_ordering(): data = np.ones(63,dtype=int) rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 15]) cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2, 7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13, 15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13, 1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11, 4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14, 5, 7, 10, 13, 15]) graph = coo_matrix((data, (rows,cols))).tocsr() perm = reverse_cuthill_mckee(graph) correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15, 0, 13, 7, 5, 9, 11, 1, 3]) assert_equal(perm, correct_perm) def test_graph_structural_rank(): # Test square matrix #1 A = csc_matrix([[1, 1, 0], [1, 0, 1], [0, 1, 0]]) assert_equal(structural_rank(A), 3) # Test square matrix #2 rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7]) cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4]) data = np.ones_like(rows) B = coo_matrix((data,(rows,cols)), shape=(8,8)) assert_equal(structural_rank(B), 6) #Test non-square matrix C = csc_matrix([[1, 0, 2, 0], [2, 0, 4, 0]]) assert_equal(structural_rank(C), 2) #Test tall matrix assert_equal(structural_rank(C.T), 2)
2,613
35.816901
71
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_traversal.py
import numpy as np from numpy.testing import assert_array_almost_equal from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree, csgraph_to_dense, csgraph_from_dense) def test_graph_breadth_first(): csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0, 1, 2, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 7, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) for directed in [True, False]: bfirst_test = breadth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst) def test_graph_depth_first(): csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) dfirst = np.array([[0, 1, 0, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 0, 0], [0, 0, 7, 0, 0], [0, 0, 0, 1, 0]]) for directed in [True, False]: dfirst_test = depth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(dfirst_test), dfirst) def test_graph_breadth_first_trivial_graph(): csgraph = np.array([[0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0]]) for directed in [True, False]: bfirst_test = breadth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst) def test_graph_depth_first_trivial_graph(): csgraph = np.array([[0]]) csgraph = csgraph_from_dense(csgraph, null_value=0) bfirst = np.array([[0]]) for directed in [True, False]: bfirst_test = depth_first_tree(csgraph, 0, directed) assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
2,325
32.710145
71
py
scipy
scipy-main/scipy/sparse/csgraph/tests/test_shortest_path.py
from io import StringIO import warnings import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose from pytest import raises as assert_raises from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson, bellman_ford, construct_dist_matrix, NegativeCycleError) import scipy.sparse from scipy.io import mmread import pytest directed_G = np.array([[0, 3, 3, 0, 0], [0, 0, 0, 2, 4], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [2, 0, 0, 2, 0]], dtype=float) undirected_G = np.array([[0, 3, 3, 1, 2], [3, 0, 0, 2, 4], [3, 0, 0, 0, 0], [1, 2, 0, 0, 2], [2, 4, 0, 2, 0]], dtype=float) unweighted_G = (directed_G > 0).astype(float) directed_SP = [[0, 3, 3, 5, 7], [3, 0, 6, 2, 4], [np.inf, np.inf, 0, np.inf, np.inf], [1, 4, 4, 0, 8], [2, 5, 5, 2, 0]] directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1], ([0, 1, 2, 3, 4], [1, 2, 0, 4, 3])), shape = (5, 5)) directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf], [3, 0, 1, np.inf, np.inf], [2, 2, 0, np.inf, np.inf], [np.inf, np.inf, np.inf, 0, 3], [np.inf, np.inf, np.inf, 1, 0]] undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1], ([0, 1, 1, 2, 2, 0, 3, 4], [1, 0, 2, 1, 0, 2, 4, 3])), shape = (5, 5)) undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf], [0, 0, 1, np.inf, np.inf], [1, 1, 0, np.inf, np.inf], [np.inf, np.inf, np.inf, 0, 1], [np.inf, np.inf, np.inf, 1, 0]] directed_pred = np.array([[-9999, 0, 0, 1, 1], [3, -9999, 0, 1, 1], [-9999, -9999, -9999, -9999, -9999], [3, 0, 0, -9999, 1], [4, 0, 0, 4, -9999]], dtype=float) undirected_SP = np.array([[0, 3, 3, 1, 2], [3, 0, 6, 2, 4], [3, 6, 0, 4, 5], [1, 2, 4, 0, 2], [2, 4, 5, 2, 0]], dtype=float) undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2], [np.inf, 0, np.inf, 2, np.inf], [np.inf, np.inf, 0, np.inf, np.inf], [1, 2, np.inf, 0, 2], [2, np.inf, np.inf, 2, 0]], dtype=float) undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5) undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf undirected_pred = np.array([[-9999, 0, 0, 0, 0], [1, -9999, 0, 1, 1], [2, 0, -9999, 0, 0], [3, 3, 0, -9999, 3], [4, 4, 0, 4, -9999]], dtype=float) directed_negative_weighted_G = np.array([[0, 0, 0], [-1, 0, 0], [0, -1, 0]], dtype=float) directed_negative_weighted_SP = np.array([[0, np.inf, np.inf], [-1, 0, np.inf], [-2, -1, 0]], dtype=float) methods = ['auto', 'FW', 'D', 'BF', 'J'] def test_dijkstra_limit(): limits = [0, 2, np.inf] results = [undirected_SP_limit_0, undirected_SP_limit_2, undirected_SP] def check(limit, result): SP = dijkstra(undirected_G, directed=False, limit=limit) assert_array_almost_equal(SP, result) for limit, result in zip(limits, results): check(limit, result) def test_directed(): def check(method): SP = shortest_path(directed_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP, directed_SP) for method in methods: check(method) def test_undirected(): def check(method, directed_in): if directed_in: SP1 = shortest_path(directed_G, method=method, directed=False, overwrite=False) assert_array_almost_equal(SP1, undirected_SP) else: SP2 = shortest_path(undirected_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP2, undirected_SP) for method in methods: for directed_in in (True, False): check(method, directed_in) def test_directed_sparse_zero(): # test directed sparse graph with zero-weight edge and two connected components def check(method): SP = shortest_path(directed_sparse_zero_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP, directed_sparse_zero_SP) for method in methods: check(method) def test_undirected_sparse_zero(): def check(method, directed_in): if directed_in: SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False, overwrite=False) assert_array_almost_equal(SP1, undirected_sparse_zero_SP) else: SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP2, undirected_sparse_zero_SP) for method in methods: for directed_in in (True, False): check(method, directed_in) @pytest.mark.parametrize('directed, SP_ans', ((True, directed_SP), (False, undirected_SP))) @pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0])) def test_dijkstra_indices_min_only(directed, SP_ans, indices): SP_ans = np.array(SP_ans) indices = np.array(indices, dtype=np.int64) min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)] min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype) for k in range(SP_ans.shape[0]): min_d_ans[k] = SP_ans[min_ind_ans[k], k] min_ind_ans[np.isinf(min_d_ans)] = -9999 SP, pred, sources = dijkstra(directed_G, directed=directed, indices=indices, min_only=True, return_predecessors=True) assert_array_almost_equal(SP, min_d_ans) assert_array_equal(min_ind_ans, sources) SP = dijkstra(directed_G, directed=directed, indices=indices, min_only=True, return_predecessors=False) assert_array_almost_equal(SP, min_d_ans) @pytest.mark.parametrize('n', (10, 100, 1000)) def test_dijkstra_min_only_random(n): np.random.seed(1234) data = scipy.sparse.rand(n, n, density=0.5, format='lil', random_state=42, dtype=np.float64) data.setdiag(np.zeros(n, dtype=np.bool_)) # choose some random vertices v = np.arange(n) np.random.shuffle(v) indices = v[:int(n*.1)] ds, pred, sources = dijkstra(data, directed=True, indices=indices, min_only=True, return_predecessors=True) for k in range(n): p = pred[k] s = sources[k] while p != -9999: assert sources[p] == s p = pred[p] def test_dijkstra_random(): # reproduces the hang observed in gh-17782 n = 10 indices = [0, 4, 4, 5, 7, 9, 0, 6, 2, 3, 7, 9, 1, 2, 9, 2, 5, 6] indptr = [0, 0, 2, 5, 6, 7, 8, 12, 15, 18, 18] data = [0.33629, 0.40458, 0.47493, 0.42757, 0.11497, 0.91653, 0.69084, 0.64979, 0.62555, 0.743, 0.01724, 0.99945, 0.31095, 0.15557, 0.02439, 0.65814, 0.23478, 0.24072] graph = scipy.sparse.csr_matrix((data, indices, indptr), shape=(n, n)) dijkstra(graph, directed=True, return_predecessors=True) def test_gh_17782_segfault(): text = """%%MatrixMarket matrix coordinate real general 84 84 22 2 1 4.699999809265137e+00 6 14 1.199999973177910e-01 9 6 1.199999973177910e-01 10 16 2.012000083923340e+01 11 10 1.422000026702881e+01 12 1 9.645999908447266e+01 13 18 2.012000083923340e+01 14 13 4.679999828338623e+00 15 11 1.199999973177910e-01 16 12 1.199999973177910e-01 18 15 1.199999973177910e-01 32 2 2.299999952316284e+00 33 20 6.000000000000000e+00 33 32 5.000000000000000e+00 36 9 3.720000028610229e+00 36 37 3.720000028610229e+00 36 38 3.720000028610229e+00 37 44 8.159999847412109e+00 38 32 7.903999328613281e+01 43 20 2.400000000000000e+01 43 33 4.000000000000000e+00 44 43 6.028000259399414e+01 """ data = mmread(StringIO(text)) dijkstra(data, directed=True, return_predecessors=True) def test_shortest_path_indices(): indices = np.arange(4) def check(func, indshape): outshape = indshape + (5,) SP = func(directed_G, directed=False, indices=indices.reshape(indshape)) assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape)) for indshape in [(4,), (4, 1), (2, 2)]: for func in (dijkstra, bellman_ford, johnson, shortest_path): check(func, indshape) assert_raises(ValueError, shortest_path, directed_G, method='FW', indices=indices) def test_predecessors(): SP_res = {True: directed_SP, False: undirected_SP} pred_res = {True: directed_pred, False: undirected_pred} def check(method, directed): SP, pred = shortest_path(directed_G, method, directed=directed, overwrite=False, return_predecessors=True) assert_array_almost_equal(SP, SP_res[directed]) assert_array_almost_equal(pred, pred_res[directed]) for method in methods: for directed in (True, False): check(method, directed) def test_construct_shortest_path(): def check(method, directed): SP1, pred = shortest_path(directed_G, directed=directed, overwrite=False, return_predecessors=True) SP2 = construct_dist_matrix(directed_G, pred, directed=directed) assert_array_almost_equal(SP1, SP2) for method in methods: for directed in (True, False): check(method, directed) def test_unweighted_path(): def check(method, directed): SP1 = shortest_path(directed_G, directed=directed, overwrite=False, unweighted=True) SP2 = shortest_path(unweighted_G, directed=directed, overwrite=False, unweighted=False) assert_array_almost_equal(SP1, SP2) for method in methods: for directed in (True, False): check(method, directed) def test_negative_cycles(): # create a small graph with a negative cycle graph = np.ones([5, 5]) graph.flat[::6] = 0 graph[1, 2] = -2 def check(method, directed): assert_raises(NegativeCycleError, shortest_path, graph, method, directed) for method in ['FW', 'J', 'BF']: for directed in (True, False): check(method, directed) @pytest.mark.parametrize("method", ['FW', 'J', 'BF']) def test_negative_weights(method): SP = shortest_path(directed_negative_weighted_G, method, directed=True) assert_allclose(SP, directed_negative_weighted_SP, atol=1e-10) def test_masked_input(): np.ma.masked_equal(directed_G, 0) def check(method): SP = shortest_path(directed_G, method=method, directed=True, overwrite=False) assert_array_almost_equal(SP, directed_SP) for method in methods: check(method) def test_overwrite(): G = np.array([[0, 3, 3, 1, 2], [3, 0, 0, 2, 4], [3, 0, 0, 0, 0], [1, 2, 0, 0, 2], [2, 4, 0, 2, 0]], dtype=float) foo = G.copy() shortest_path(foo, overwrite=False) assert_array_equal(foo, G) @pytest.mark.parametrize('method', methods) def test_buffer(method): # Smoke test that sparse matrices with read-only buffers (e.g., those from # joblib workers) do not cause:: # # ValueError: buffer source array is read-only # G = scipy.sparse.csr_matrix([[1.]]) G.data.flags['WRITEABLE'] = False shortest_path(G, method=method) def test_NaN_warnings(): with warnings.catch_warnings(record=True) as record: shortest_path(np.array([[0, 1], [np.nan, 0]])) for r in record: assert r.category is not RuntimeWarning def test_sparse_matrices(): # Test that using lil,csr and csc sparse matrix do not cause error G_dense = np.array([[0, 3, 0, 0, 0], [0, 0, -1, 0, 0], [0, 0, 0, 2, 0], [0, 0, 0, 0, 4], [0, 0, 0, 0, 0]], dtype=float) SP = shortest_path(G_dense) G_csr = scipy.sparse.csr_matrix(G_dense) G_csc = scipy.sparse.csc_matrix(G_dense) G_lil = scipy.sparse.lil_matrix(G_dense) assert_array_almost_equal(SP, shortest_path(G_csr)) assert_array_almost_equal(SP, shortest_path(G_csc)) assert_array_almost_equal(SP, shortest_path(G_lil))
14,441
35.469697
88
py
scipy
scipy-main/scipy/sparse/csgraph/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/sparse/tests/test_deprecations.py
import scipy as sp import pytest def test_array_api_deprecations(): X = sp.sparse.csr_array([ [1,2,3], [4,0,6] ]) msg = "1.13.0" with pytest.deprecated_call(match=msg): X.get_shape() with pytest.deprecated_call(match=msg): X.set_shape((2,3)) with pytest.deprecated_call(match=msg): X.asfptype() with pytest.deprecated_call(match=msg): X.getmaxprint() with pytest.deprecated_call(match=msg): X.getnnz() with pytest.deprecated_call(match=msg): X.getH() with pytest.deprecated_call(match=msg): X.getcol(1).todense() with pytest.deprecated_call(match=msg): X.getrow(1).todense()
709
19.285714
43
py
scipy
scipy-main/scipy/sparse/tests/test_spfuncs.py
from numpy import array, kron, diag from numpy.testing import assert_, assert_equal from scipy.sparse import _spfuncs as spfuncs from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix from scipy.sparse._sparsetools import (csr_scale_rows, csr_scale_columns, bsr_scale_rows, bsr_scale_columns) class TestSparseFunctions: def test_scale_rows_and_cols(self): D = array([[1, 0, 0, 2, 3], [0, 4, 0, 5, 0], [0, 0, 6, 7, 0]]) #TODO expose through function S = csr_matrix(D) v = array([1,2,3]) csr_scale_rows(3,5,S.indptr,S.indices,S.data,v) assert_equal(S.toarray(), diag(v)@D) S = csr_matrix(D) v = array([1,2,3,4,5]) csr_scale_columns(3,5,S.indptr,S.indices,S.data,v) assert_equal(S.toarray(), D@diag(v)) # blocks E = kron(D,[[1,2],[3,4]]) S = bsr_matrix(E,blocksize=(2,2)) v = array([1,2,3,4,5,6]) bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v) assert_equal(S.toarray(), diag(v)@E) S = bsr_matrix(E,blocksize=(2,2)) v = array([1,2,3,4,5,6,7,8,9,10]) bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v) assert_equal(S.toarray(), E@diag(v)) E = kron(D,[[1,2,3],[4,5,6]]) S = bsr_matrix(E,blocksize=(2,3)) v = array([1,2,3,4,5,6]) bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v) assert_equal(S.toarray(), diag(v)@E) S = bsr_matrix(E,blocksize=(2,3)) v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v) assert_equal(S.toarray(), E@diag(v)) def test_estimate_blocksize(self): mats = [] mats.append([[0,1],[1,0]]) mats.append([[1,1,0],[0,0,1],[1,0,1]]) mats.append([[0],[0],[1]]) mats = [array(x) for x in mats] blks = [] blks.append([[1]]) blks.append([[1,1],[1,1]]) blks.append([[1,1],[0,1]]) blks.append([[1,1,0],[1,0,1],[1,1,1]]) blks = [array(x) for x in blks] for A in mats: for B in blks: X = kron(A,B) r,c = spfuncs.estimate_blocksize(X) assert_(r >= B.shape[0]) assert_(c >= B.shape[1]) def test_count_blocks(self): def gold(A,bs): R,C = bs I,J = A.nonzero() return len(set(zip(I//R,J//C))) mats = [] mats.append([[0]]) mats.append([[1]]) mats.append([[1,0]]) mats.append([[1,1]]) mats.append([[0,1],[1,0]]) mats.append([[1,1,0],[0,0,1],[1,0,1]]) mats.append([[0],[0],[1]]) for A in mats: for B in mats: X = kron(A,B) Y = csr_matrix(X) for R in range(1,6): for C in range(1,6): assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C))) X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]]) Y = csc_matrix(X) assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2))) assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2)))
3,258
32.255102
86
py
scipy
scipy-main/scipy/sparse/tests/test_sputils.py
"""unit tests for sparse utility functions""" import numpy as np from numpy.testing import assert_equal from pytest import raises as assert_raises from scipy.sparse import _sputils as sputils from scipy.sparse._sputils import matrix class TestSparseUtils: def test_upcast(self): assert_equal(sputils.upcast('intc'), np.intc) assert_equal(sputils.upcast('int32', 'float32'), np.float64) assert_equal(sputils.upcast('bool', complex, float), np.complex128) assert_equal(sputils.upcast('i', 'd'), np.float64) def test_getdtype(self): A = np.array([1], dtype='int8') assert_equal(sputils.getdtype(None, default=float), float) assert_equal(sputils.getdtype(None, a=A), np.int8) with assert_raises( ValueError, match="object dtype is not supported by sparse matrices", ): sputils.getdtype("O") def test_isscalarlike(self): assert_equal(sputils.isscalarlike(3.0), True) assert_equal(sputils.isscalarlike(-4), True) assert_equal(sputils.isscalarlike(2.5), True) assert_equal(sputils.isscalarlike(1 + 3j), True) assert_equal(sputils.isscalarlike(np.array(3)), True) assert_equal(sputils.isscalarlike("16"), True) assert_equal(sputils.isscalarlike(np.array([3])), False) assert_equal(sputils.isscalarlike([[3]]), False) assert_equal(sputils.isscalarlike((1,)), False) assert_equal(sputils.isscalarlike((1, 2)), False) def test_isintlike(self): assert_equal(sputils.isintlike(-4), True) assert_equal(sputils.isintlike(np.array(3)), True) assert_equal(sputils.isintlike(np.array([3])), False) with assert_raises( ValueError, match="Inexact indices into sparse matrices are not allowed" ): sputils.isintlike(3.0) assert_equal(sputils.isintlike(2.5), False) assert_equal(sputils.isintlike(1 + 3j), False) assert_equal(sputils.isintlike((1,)), False) assert_equal(sputils.isintlike((1, 2)), False) def test_isshape(self): assert_equal(sputils.isshape((1, 2)), True) assert_equal(sputils.isshape((5, 2)), True) assert_equal(sputils.isshape((1.5, 2)), False) assert_equal(sputils.isshape((2, 2, 2)), False) assert_equal(sputils.isshape(([2], 2)), False) assert_equal(sputils.isshape((-1, 2), nonneg=False),True) assert_equal(sputils.isshape((2, -1), nonneg=False),True) assert_equal(sputils.isshape((-1, 2), nonneg=True),False) assert_equal(sputils.isshape((2, -1), nonneg=True),False) assert_equal(sputils.isshape((1.5, 2), allow_ndim=True), False) assert_equal(sputils.isshape(([2], 2), allow_ndim=True), False) assert_equal(sputils.isshape((2, 2, -2), nonneg=True, allow_ndim=True), False) assert_equal(sputils.isshape((2,), allow_ndim=True), True) assert_equal(sputils.isshape((2, 2,), allow_ndim=True), True) assert_equal(sputils.isshape((2, 2, 2), allow_ndim=True), True) def test_issequence(self): assert_equal(sputils.issequence((1,)), True) assert_equal(sputils.issequence((1, 2, 3)), True) assert_equal(sputils.issequence([1]), True) assert_equal(sputils.issequence([1, 2, 3]), True) assert_equal(sputils.issequence(np.array([1, 2, 3])), True) assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False) assert_equal(sputils.issequence(3), False) def test_ismatrix(self): assert_equal(sputils.ismatrix(((),)), True) assert_equal(sputils.ismatrix([[1], [2]]), True) assert_equal(sputils.ismatrix(np.arange(3)[None]), True) assert_equal(sputils.ismatrix([1, 2]), False) assert_equal(sputils.ismatrix(np.arange(3)), False) assert_equal(sputils.ismatrix([[[1]]]), False) assert_equal(sputils.ismatrix(3), False) def test_isdense(self): assert_equal(sputils.isdense(np.array([1])), True) assert_equal(sputils.isdense(matrix([1])), True) def test_validateaxis(self): assert_raises(TypeError, sputils.validateaxis, (0, 1)) assert_raises(TypeError, sputils.validateaxis, 1.5) assert_raises(ValueError, sputils.validateaxis, 3) # These function calls should not raise errors for axis in (-2, -1, 0, 1, None): sputils.validateaxis(axis) def test_get_index_dtype(self): imax = np.int64(np.iinfo(np.int32).max) too_big = imax + 1 # Check that uint32's with no values too large doesn't return # int64 a1 = np.ones(90, dtype='uint32') a2 = np.ones(90, dtype='uint32') assert_equal( np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), np.dtype('int32') ) # Check that if we can not convert but all values are less than or # equal to max that we can just convert to int32 a1[-1] = imax assert_equal( np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), np.dtype('int32') ) # Check that if it can not convert directly and the contents are # too large that we return int64 a1[-1] = too_big assert_equal( np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), np.dtype('int64') ) # test that if can not convert and didn't specify to check_contents # we return int64 a1 = np.ones(89, dtype='uint32') a2 = np.ones(89, dtype='uint32') assert_equal( np.dtype(sputils.get_index_dtype((a1, a2))), np.dtype('int64') ) # Check that even if we have arrays that can be converted directly # that if we specify a maxval directly it takes precedence a1 = np.ones(12, dtype='uint32') a2 = np.ones(12, dtype='uint32') assert_equal( np.dtype(sputils.get_index_dtype( (a1, a2), maxval=too_big, check_contents=True )), np.dtype('int64') ) # Check that an array with a too max size and maxval set # still returns int64 a1[-1] = too_big assert_equal( np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)), np.dtype('int64') ) def test_check_shape_overflow(self): new_shape = sputils.check_shape([(10, -1)], (65535, 131070)) assert_equal(new_shape, (10, 858967245)) def test_matrix(self): a = [[1, 2, 3]] b = np.array(a) assert isinstance(sputils.matrix(a), np.matrix) assert isinstance(sputils.matrix(b), np.matrix) c = sputils.matrix(b) c[:, :] = 123 assert_equal(b, a) c = sputils.matrix(b, copy=False) c[:, :] = 123 assert_equal(b, [[123, 123, 123]]) def test_asmatrix(self): a = [[1, 2, 3]] b = np.array(a) assert isinstance(sputils.asmatrix(a), np.matrix) assert isinstance(sputils.asmatrix(b), np.matrix) c = sputils.asmatrix(b) c[:, :] = 123 assert_equal(b, [[123, 123, 123]])
7,297
36.045685
79
py
scipy
scipy-main/scipy/sparse/tests/test_extract.py
"""test sparse matrix construction functions""" from numpy.testing import assert_equal from scipy.sparse import csr_matrix import numpy as np from scipy.sparse import _extract class TestExtract: def setup_method(self): self.cases = [ csr_matrix([[1,2]]), csr_matrix([[1,0]]), csr_matrix([[0,0]]), csr_matrix([[1],[2]]), csr_matrix([[1],[0]]), csr_matrix([[0],[0]]), csr_matrix([[1,2],[3,4]]), csr_matrix([[0,1],[0,0]]), csr_matrix([[0,0],[1,0]]), csr_matrix([[0,0],[0,0]]), csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]), csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T, ] def find(self): for A in self.cases: I,J,V = _extract.find(A) assert_equal(A.toarray(), csr_matrix(((I,J),V), shape=A.shape)) def test_tril(self): for A in self.cases: B = A.toarray() for k in [-3,-2,-1,0,1,2,3]: assert_equal(_extract.tril(A,k=k).toarray(), np.tril(B,k=k)) def test_triu(self): for A in self.cases: B = A.toarray() for k in [-3,-2,-1,0,1,2,3]: assert_equal(_extract.triu(A,k=k).toarray(), np.triu(B,k=k))
1,313
29.55814
76
py
scipy
scipy-main/scipy/sparse/tests/test_csr.py
import numpy as np from numpy.testing import assert_array_almost_equal, assert_ from scipy.sparse import csr_matrix, hstack import pytest def _check_csr_rowslice(i, sl, X, Xcsr): np_slice = X[i, sl] csr_slice = Xcsr[i, sl] assert_array_almost_equal(np_slice, csr_slice.toarray()[0]) assert_(type(csr_slice) is csr_matrix) def test_csr_rowslice(): N = 10 np.random.seed(0) X = np.random.random((N, N)) X[X > 0.7] = 0 Xcsr = csr_matrix(X) slices = [slice(None, None, None), slice(None, None, -1), slice(1, -2, 2), slice(-2, 1, -2)] for i in range(N): for sl in slices: _check_csr_rowslice(i, sl, X, Xcsr) def test_csr_getrow(): N = 10 np.random.seed(0) X = np.random.random((N, N)) X[X > 0.7] = 0 Xcsr = csr_matrix(X) for i in range(N): arr_row = X[i:i + 1, :] csr_row = Xcsr.getrow(i) assert_array_almost_equal(arr_row, csr_row.toarray()) assert_(type(csr_row) is csr_matrix) def test_csr_getcol(): N = 10 np.random.seed(0) X = np.random.random((N, N)) X[X > 0.7] = 0 Xcsr = csr_matrix(X) for i in range(N): arr_col = X[:, i:i + 1] csr_col = Xcsr.getcol(i) assert_array_almost_equal(arr_col, csr_col.toarray()) assert_(type(csr_col) is csr_matrix) @pytest.mark.parametrize("matrix_input, axis, expected_shape", [(csr_matrix([[1, 0, 0, 0], [0, 0, 0, 0], [0, 2, 3, 0]]), 0, (0, 4)), (csr_matrix([[1, 0, 0, 0], [0, 0, 0, 0], [0, 2, 3, 0]]), 1, (3, 0)), (csr_matrix([[1, 0, 0, 0], [0, 0, 0, 0], [0, 2, 3, 0]]), 'both', (0, 0)), (csr_matrix([[0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 2, 3, 0]]), 0, (0, 5))]) def test_csr_empty_slices(matrix_input, axis, expected_shape): # see gh-11127 for related discussion slice_1 = matrix_input.A.shape[0] - 1 slice_2 = slice_1 slice_3 = slice_2 - 1 if axis == 0: actual_shape_1 = matrix_input[slice_1:slice_2, :].A.shape actual_shape_2 = matrix_input[slice_1:slice_3, :].A.shape elif axis == 1: actual_shape_1 = matrix_input[:, slice_1:slice_2].A.shape actual_shape_2 = matrix_input[:, slice_1:slice_3].A.shape elif axis == 'both': actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].A.shape actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].A.shape assert actual_shape_1 == expected_shape assert actual_shape_1 == actual_shape_2 def test_csr_bool_indexing(): data = csr_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) list_indices1 = [False, True, False] array_indices1 = np.array(list_indices1) list_indices2 = [[False, True, False], [False, True, False], [False, True, False]] array_indices2 = np.array(list_indices2) list_indices3 = ([False, True, False], [False, True, False]) array_indices3 = (np.array(list_indices3[0]), np.array(list_indices3[1])) slice_list1 = data[list_indices1].toarray() slice_array1 = data[array_indices1].toarray() slice_list2 = data[list_indices2] slice_array2 = data[array_indices2] slice_list3 = data[list_indices3] slice_array3 = data[array_indices3] assert (slice_list1 == slice_array1).all() assert (slice_list2 == slice_array2).all() assert (slice_list3 == slice_array3).all() def test_csr_hstack_int64(): """ Tests if hstack properly promotes to indices and indptr arrays to np.int64 when using np.int32 during concatenation would result in either array overflowing. """ max_int32 = np.iinfo(np.int32).max # First case: indices would overflow with int32 data = [1.0] row = [0] max_indices_1 = max_int32 - 1 max_indices_2 = 3 # Individual indices arrays are representable with int32 col_1 = [max_indices_1 - 1] col_2 = [max_indices_2 - 1] X_1 = csr_matrix((data, (row, col_1))) X_2 = csr_matrix((data, (row, col_2))) assert max(max_indices_1 - 1, max_indices_2 - 1) < max_int32 assert X_1.indices.dtype == X_1.indptr.dtype == np.int32 assert X_2.indices.dtype == X_2.indptr.dtype == np.int32 # ... but when concatenating their CSR matrices, the resulting indices # array can't be represented with int32 and must be promoted to int64. X_hs = hstack([X_1, X_2], format="csr") assert X_hs.indices.max() == max_indices_1 + max_indices_2 - 1 assert max_indices_1 + max_indices_2 - 1 > max_int32 assert X_hs.indices.dtype == X_hs.indptr.dtype == np.int64 # Even if the matrices are empty, we must account for their size # contribution so that we may safely set the final elements. X_1_empty = csr_matrix(X_1.shape) X_2_empty = csr_matrix(X_2.shape) X_hs_empty = hstack([X_1_empty, X_2_empty], format="csr") assert X_hs_empty.shape == X_hs.shape assert X_hs_empty.indices.dtype == np.int64 # Should be just small enough to stay in int32 after stack. Note that # we theoretically could support indices.max() == max_int32, but due to an # edge-case in the underlying sparsetools code # (namely the `coo_tocsr` routine), # we require that max(X_hs_32.shape) < max_int32 as well. # Hence we can only support max_int32 - 1. col_3 = [max_int32 - max_indices_1 - 1] X_3 = csr_matrix((data, (row, col_3))) X_hs_32 = hstack([X_1, X_3], format="csr") assert X_hs_32.indices.dtype == np.int32 assert X_hs_32.indices.max() == max_int32 - 1
5,651
32.247059
86
py
scipy
scipy-main/scipy/sparse/tests/test_construct.py
"""test sparse matrix construction functions""" import numpy as np from numpy import array from numpy.testing import (assert_equal, assert_, assert_array_equal, assert_array_almost_equal_nulp) import pytest from pytest import raises as assert_raises from scipy._lib._testutils import check_free_memory from scipy._lib._util import check_random_state from scipy.sparse import (csr_matrix, coo_matrix, _construct as construct) from scipy.sparse._construct import rand as sprand from scipy.sparse._sputils import matrix sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok'] #TODO check whether format=XXX is respected def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None): # Helper function for testing. random_state = check_random_state(random_state) data_rvs = random_state.standard_normal return construct.random(m, n, density, format, dtype, random_state, data_rvs) class TestConstructUtils: def test_spdiags(self): diags1 = array([[1, 2, 3, 4, 5]]) diags2 = array([[1, 2, 3, 4, 5], [6, 7, 8, 9,10]]) diags3 = array([[1, 2, 3, 4, 5], [6, 7, 8, 9,10], [11,12,13,14,15]]) cases = [] cases.append((diags1, 0, 1, 1, [[1]])) cases.append((diags1, [0], 1, 1, [[1]])) cases.append((diags1, [0], 2, 1, [[1],[0]])) cases.append((diags1, [0], 1, 2, [[1,0]])) cases.append((diags1, [1], 1, 2, [[0,2]])) cases.append((diags1,[-1], 1, 2, [[0,0]])) cases.append((diags1, [0], 2, 2, [[1,0],[0,2]])) cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]])) cases.append((diags1, [3], 2, 2, [[0,0],[0,0]])) cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]])) cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]])) cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]])) cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]])) cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]])) cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0], [0,0,0,4,0,0], [0,0,0,0,5,0], [6,0,0,0,0,0], [0,7,0,0,0,0], [0,0,8,0,0,0]])) cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0], [1, 7,13, 0, 0, 0], [0, 2, 8,14, 0, 0], [0, 0, 3, 9,15, 0], [0, 0, 0, 4,10, 0], [0, 0, 0, 0, 5, 0]])) cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0], [11, 0, 0, 9, 0], [0,12, 0, 0,10], [0, 0,13, 0, 0], [1, 0, 0,14, 0], [0, 2, 0, 0,15]])) cases.append((diags3, [-1, 1, 2], len(diags3[0]), len(diags3[0]), [[0, 7, 13, 0, 0], [1, 0, 8, 14, 0], [0, 2, 0, 9, 15], [0, 0, 3, 0, 10], [0, 0, 0, 4, 0]])) for d, o, m, n, result in cases: if len(d[0]) == m and m == n: assert_equal(construct.spdiags(d, o).toarray(), result) assert_equal(construct.spdiags(d, o, m, n).toarray(), result) assert_equal(construct.spdiags(d, o, (m, n)).toarray(), result) def test_diags(self): a = array([1, 2, 3, 4, 5]) b = array([6, 7, 8, 9, 10]) c = array([11, 12, 13, 14, 15]) cases = [] cases.append((a[:1], 0, (1, 1), [[1]])) cases.append(([a[:1]], [0], (1, 1), [[1]])) cases.append(([a[:1]], [0], (2, 1), [[1],[0]])) cases.append(([a[:1]], [0], (1, 2), [[1,0]])) cases.append(([a[:1]], [1], (1, 2), [[0,1]])) cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]])) cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]])) cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]])) cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]])) cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]])) cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]])) cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]])) cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]])) cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]])) cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]])) cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]])) cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]])) cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]])) cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]])) cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]])) cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]])) cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]])) cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]])) cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]])) cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]])) cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0], [0,0,0,2,0,0], [0,0,0,0,3,0], [6,0,0,0,0,4], [0,7,0,0,0,0], [0,0,8,0,0,0]])) cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0], [1, 7,12, 0, 0], [0, 2, 8,13, 0], [0, 0, 3, 9,14], [0, 0, 0, 4,10]])) cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0], [11, 0, 0, 7, 0], [0,12, 0, 0, 8], [0, 0,13, 0, 0], [1, 0, 0,14, 0], [0, 2, 0, 0,15]])) # too long arrays are OK cases.append(([a], [0], (1, 1), [[1]])) cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]])) cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]])) # scalar case: broadcasting cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0], [1, -2, 1], [0, 1, -2]])) for d, o, shape, result in cases: err_msg = f"{d!r} {o!r} {shape!r} {result!r}" assert_equal(construct.diags(d, offsets=o, shape=shape).toarray(), result, err_msg=err_msg) if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape): # should be able to find the shape automatically assert_equal(construct.diags(d, offsets=o).toarray(), result, err_msg=err_msg) def test_diags_default(self): a = array([1, 2, 3, 4, 5]) assert_equal(construct.diags(a).toarray(), np.diag(a)) def test_diags_default_bad(self): a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]]) assert_raises(ValueError, construct.diags, a) def test_diags_bad(self): a = array([1, 2, 3, 4, 5]) b = array([6, 7, 8, 9, 10]) c = array([11, 12, 13, 14, 15]) cases = [] cases.append(([a[:0]], 0, (1, 1))) cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5))) cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5))) cases.append(([a[:2],c,b[:3]], [-4,2,-1], None)) cases.append(([], [-4,2,-1], None)) cases.append(([1], [-5], (4, 4))) cases.append(([a], 0, None)) for d, o, shape in cases: assert_raises(ValueError, construct.diags, d, offsets=o, shape=shape) assert_raises(TypeError, construct.diags, [[None]], offsets=[0]) def test_diags_vs_diag(self): # Check that # # diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ... # np.random.seed(1234) for n_diags in [1, 2, 3, 4, 5, 10]: n = 1 + n_diags//2 + np.random.randint(0, 10) offsets = np.arange(-n+1, n-1) np.random.shuffle(offsets) offsets = offsets[:n_diags] diagonals = [np.random.rand(n - abs(q)) for q in offsets] mat = construct.diags(diagonals, offsets=offsets) dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)]) assert_array_almost_equal_nulp(mat.toarray(), dense_mat) if len(offsets) == 1: mat = construct.diags(diagonals[0], offsets=offsets[0]) dense_mat = np.diag(diagonals[0], offsets[0]) assert_array_almost_equal_nulp(mat.toarray(), dense_mat) def test_diags_dtype(self): x = construct.diags([2.2], offsets=[0], shape=(2, 2), dtype=int) assert_equal(x.dtype, int) assert_equal(x.toarray(), [[2, 0], [0, 2]]) def test_diags_one_diagonal(self): d = list(range(5)) for k in range(-5, 6): assert_equal(construct.diags(d, offsets=k).toarray(), construct.diags([d], offsets=[k]).toarray()) def test_diags_empty(self): x = construct.diags([]) assert_equal(x.shape, (0, 0)) def test_identity(self): assert_equal(construct.identity(1).toarray(), [[1]]) assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]]) I = construct.identity(3, dtype='int8', format='dia') assert_equal(I.dtype, np.dtype('int8')) assert_equal(I.format, 'dia') for fmt in sparse_formats: I = construct.identity(3, format=fmt) assert_equal(I.format, fmt) assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) def test_eye(self): assert_equal(construct.eye(1,1).toarray(), [[1]]) assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]]) assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]]) assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]]) assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16')) for m in [3, 5]: for n in [3, 5]: for k in range(-5,6): # scipy.sparse.eye deviates from np.eye here. np.eye will # create arrays of all 0's when the diagonal offset is # greater than the size of the array. For sparse arrays # this makes less sense, especially as it results in dia # arrays with negative diagonals. Therefore sp.sparse.eye # validates that diagonal offsets fall within the shape of # the array. See gh-18555. if (k > 0 and k > n) or (k < 0 and abs(k) > m): with pytest.raises( ValueError, match="Offset.*out of bounds" ): construct.eye(m, n, k=k) else: assert_equal( construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k) ) if m == n: assert_equal( construct.eye(m, k=k).toarray(), np.eye(m, n, k=k) ) def test_eye_one(self): assert_equal(construct.eye(1).toarray(), [[1]]) assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]]) I = construct.eye(3, dtype='int8', format='dia') assert_equal(I.dtype, np.dtype('int8')) assert_equal(I.format, 'dia') for fmt in sparse_formats: I = construct.eye(3, format=fmt) assert_equal(I.format, fmt) assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) def test_kron(self): cases = [] cases.append(array([[0]])) cases.append(array([[-1]])) cases.append(array([[4]])) cases.append(array([[10]])) cases.append(array([[0],[0]])) cases.append(array([[0,0]])) cases.append(array([[1,2],[3,4]])) cases.append(array([[0,2],[5,0]])) cases.append(array([[0,2,-6],[8,0,14]])) cases.append(array([[5,4],[0,0],[6,0]])) cases.append(array([[5,4,4],[1,0,0],[6,0,8]])) cases.append(array([[0,1,0,2,0,5,8]])) cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]])) for a in cases: for b in cases: expected = np.kron(a, b) for fmt in sparse_formats: result = construct.kron(csr_matrix(a), csr_matrix(b), format=fmt) assert_equal(result.format, fmt) assert_array_equal(result.toarray(), expected) def test_kron_large(self): n = 2**16 a = construct.eye(1, n, n-1) b = construct.eye(n, 1, 1-n) construct.kron(a, a) construct.kron(b, b) def test_kronsum(self): cases = [] cases.append(array([[0]])) cases.append(array([[-1]])) cases.append(array([[4]])) cases.append(array([[10]])) cases.append(array([[1,2],[3,4]])) cases.append(array([[0,2],[5,0]])) cases.append(array([[0,2,-6],[8,0,14],[0,3,0]])) cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]])) for a in cases: for b in cases: result = construct.kronsum( csr_matrix(a), csr_matrix(b)).toarray() expected = np.kron(np.eye(len(b)), a) + \ np.kron(b, np.eye(len(a))) assert_array_equal(result,expected) def test_vstack(self): A = coo_matrix([[1,2],[3,4]]) B = coo_matrix([[5,6]]) expected = array([[1, 2], [3, 4], [5, 6]]) assert_equal(construct.vstack([A, B]).toarray(), expected) assert_equal(construct.vstack([A, B], dtype=np.float32).dtype, np.float32) assert_equal(construct.vstack([A.tocsr(), B.tocsr()]).toarray(), expected) result = construct.vstack([A.tocsr(), B.tocsr()], dtype=np.float32) assert_equal(result.dtype, np.float32) assert_equal(result.indices.dtype, np.int32) assert_equal(result.indptr.dtype, np.int32) assert_equal(construct.vstack([A.tocsc(), B.tocsc()]).toarray(), expected) result = construct.vstack([A.tocsc(), B.tocsc()], dtype=np.float32) assert_equal(result.dtype, np.float32) assert_equal(result.indices.dtype, np.int32) assert_equal(result.indptr.dtype, np.int32) def test_hstack(self): A = coo_matrix([[1,2],[3,4]]) B = coo_matrix([[5],[6]]) expected = array([[1, 2, 5], [3, 4, 6]]) assert_equal(construct.hstack([A, B]).toarray(), expected) assert_equal(construct.hstack([A, B], dtype=np.float32).dtype, np.float32) assert_equal(construct.hstack([A.tocsc(), B.tocsc()]).toarray(), expected) assert_equal(construct.hstack([A.tocsc(), B.tocsc()], dtype=np.float32).dtype, np.float32) assert_equal(construct.hstack([A.tocsr(), B.tocsr()]).toarray(), expected) assert_equal(construct.hstack([A.tocsr(), B.tocsr()], dtype=np.float32).dtype, np.float32) def test_bmat(self): A = coo_matrix([[1, 2], [3, 4]]) B = coo_matrix([[5],[6]]) C = coo_matrix([[7]]) D = coo_matrix((0, 0)) expected = array([[1, 2, 5], [3, 4, 6], [0, 0, 7]]) assert_equal(construct.bmat([[A, B], [None, C]]).toarray(), expected) E = csr_matrix((1, 2), dtype=np.int32) assert_equal(construct.bmat([[A.tocsr(), B.tocsr()], [E, C.tocsr()]]).toarray(), expected) assert_equal(construct.bmat([[A.tocsc(), B.tocsc()], [E.tocsc(), C.tocsc()]]).toarray(), expected) expected = array([[1, 2, 0], [3, 4, 0], [0, 0, 7]]) assert_equal(construct.bmat([[A, None], [None, C]]).toarray(), expected) assert_equal(construct.bmat([[A.tocsr(), E.T.tocsr()], [E, C.tocsr()]]).toarray(), expected) assert_equal(construct.bmat([[A.tocsc(), E.T.tocsc()], [E.tocsc(), C.tocsc()]]).toarray(), expected) Z = csr_matrix((1, 1), dtype=np.int32) expected = array([[0, 5], [0, 6], [7, 0]]) assert_equal(construct.bmat([[None, B], [C, None]]).toarray(), expected) assert_equal(construct.bmat([[E.T.tocsr(), B.tocsr()], [C.tocsr(), Z]]).toarray(), expected) assert_equal(construct.bmat([[E.T.tocsc(), B.tocsc()], [C.tocsc(), Z.tocsc()]]).toarray(), expected) expected = matrix(np.empty((0, 0))) assert_equal(construct.bmat([[None, None]]).toarray(), expected) assert_equal(construct.bmat([[None, D], [D, None]]).toarray(), expected) # test bug reported in gh-5976 expected = array([[7]]) assert_equal(construct.bmat([[None, D], [C, None]]).toarray(), expected) # test failure cases with assert_raises(ValueError) as excinfo: construct.bmat([[A], [B]]) excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2') with assert_raises(ValueError) as excinfo: construct.bmat([[A.tocsr()], [B.tocsr()]]) excinfo.match(r'incompatible dimensions for axis 1') with assert_raises(ValueError) as excinfo: construct.bmat([[A.tocsc()], [B.tocsc()]]) excinfo.match(r'Mismatching dimensions along axis 1: ({1, 2}|{2, 1})') with assert_raises(ValueError) as excinfo: construct.bmat([[A, C]]) excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2') with assert_raises(ValueError) as excinfo: construct.bmat([[A.tocsr(), C.tocsr()]]) excinfo.match(r'Mismatching dimensions along axis 0: ({1, 2}|{2, 1})') with assert_raises(ValueError) as excinfo: construct.bmat([[A.tocsc(), C.tocsc()]]) excinfo.match(r'incompatible dimensions for axis 0') @pytest.mark.slow @pytest.mark.xfail_on_32bit("Can't create large array for test") def test_concatenate_int32_overflow(self): """ test for indptr overflow when concatenating matrices """ check_free_memory(30000) n = 33000 A = csr_matrix(np.ones((n, n), dtype=bool)) B = A.copy() C = construct._compressed_sparse_stack((A,B), 0) assert_(np.all(np.equal(np.diff(C.indptr), n))) assert_equal(C.indices.dtype, np.int64) assert_equal(C.indptr.dtype, np.int64) def test_block_diag_basic(self): """ basic test for block_diag """ A = coo_matrix([[1,2],[3,4]]) B = coo_matrix([[5],[6]]) C = coo_matrix([[7]]) expected = array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 5, 0], [0, 0, 6, 0], [0, 0, 0, 7]]) assert_equal(construct.block_diag((A, B, C)).toarray(), expected) def test_block_diag_scalar_1d_args(self): """ block_diag with scalar and 1d arguments """ # one 1d matrix and a scalar assert_array_equal(construct.block_diag([[2,3], 4]).toarray(), [[2, 3, 0], [0, 0, 4]]) def test_block_diag_1(self): """ block_diag with one matrix """ assert_equal(construct.block_diag([[1, 0]]).toarray(), array([[1, 0]])) assert_equal(construct.block_diag([[[1, 0]]]).toarray(), array([[1, 0]])) assert_equal(construct.block_diag([[[1], [0]]]).toarray(), array([[1], [0]])) # just on scalar assert_equal(construct.block_diag([1]).toarray(), array([[1]])) def test_block_diag_sparse_matrices(self): """ block_diag with sparse matrices """ sparse_col_matrices = [coo_matrix(([[1, 2, 3]]), shape=(1, 3)), coo_matrix(([[4, 5]]), shape=(1, 2))] block_sparse_cols_matrices = construct.block_diag(sparse_col_matrices) assert_equal(block_sparse_cols_matrices.toarray(), array([[1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])) sparse_row_matrices = [coo_matrix(([[1], [2], [3]]), shape=(3, 1)), coo_matrix(([[4], [5]]), shape=(2, 1))] block_sparse_row_matrices = construct.block_diag(sparse_row_matrices) assert_equal(block_sparse_row_matrices.toarray(), array([[1, 0], [2, 0], [3, 0], [0, 4], [0, 5]])) def test_random_sampling(self): # Simple sanity checks for sparse random sampling. for f in sprand, _sprandn: for t in [np.float32, np.float64, np.longdouble, np.int32, np.int64, np.complex64, np.complex128]: x = f(5, 10, density=0.1, dtype=t) assert_equal(x.dtype, t) assert_equal(x.shape, (5, 10)) assert_equal(x.nnz, 5) x1 = f(5, 10, density=0.1, random_state=4321) assert_equal(x1.dtype, np.double) x2 = f(5, 10, density=0.1, random_state=np.random.RandomState(4321)) assert_array_equal(x1.data, x2.data) assert_array_equal(x1.row, x2.row) assert_array_equal(x1.col, x2.col) for density in [0.0, 0.1, 0.5, 1.0]: x = f(5, 10, density=density) assert_equal(x.nnz, int(density * np.prod(x.shape))) for fmt in ['coo', 'csc', 'csr', 'lil']: x = f(5, 10, format=fmt) assert_equal(x.format, fmt) assert_raises(ValueError, lambda: f(5, 10, 1.1)) assert_raises(ValueError, lambda: f(5, 10, -0.1)) def test_rand(self): # Simple distributional checks for sparse.rand. random_states = [None, 4321, np.random.RandomState()] try: gen = np.random.default_rng() random_states.append(gen) except AttributeError: pass for random_state in random_states: x = sprand(10, 20, density=0.5, dtype=np.float64, random_state=random_state) assert_(np.all(np.less_equal(0, x.data))) assert_(np.all(np.less_equal(x.data, 1))) def test_randn(self): # Simple distributional checks for sparse.randn. # Statistically, some of these should be negative # and some should be greater than 1. random_states = [None, 4321, np.random.RandomState()] try: gen = np.random.default_rng() random_states.append(gen) except AttributeError: pass for random_state in random_states: x = _sprandn(10, 20, density=0.5, dtype=np.float64, random_state=random_state) assert_(np.any(np.less(x.data, 0))) assert_(np.any(np.less(1, x.data))) def test_random_accept_str_dtype(self): # anything that np.dtype can convert to a dtype should be accepted # for the dtype construct.random(10, 10, dtype='d') def test_random_sparse_matrix_returns_correct_number_of_non_zero_elements(self): # A 10 x 10 matrix, with density of 12.65%, should have 13 nonzero elements. # 10 x 10 x 0.1265 = 12.65, which should be rounded up to 13, not 12. sparse_matrix = construct.random(10, 10, density=0.1265) assert_equal(sparse_matrix.count_nonzero(),13) def test_diags_array(): """Tests of diags_array that do not rely on diags wrapper.""" diag = np.arange(1, 5) assert_array_equal(construct.diags_array(diag).toarray(), np.diag(diag)) assert_array_equal( construct.diags_array(diag, offsets=2).toarray(), np.diag(diag, k=2) ) assert_array_equal( construct.diags_array(diag, offsets=2, shape=(4, 4)).toarray(), np.diag(diag, k=2)[:4, :4] ) # Offset outside bounds when shape specified with pytest.raises(ValueError, match=".*out of bounds"): construct.diags(np.arange(1, 5), 5, shape=(4, 4))
26,565
41.710611
107
py
scipy
scipy-main/scipy/sparse/tests/test_matrix_io.py
import os import numpy as np import tempfile from pytest import raises as assert_raises from numpy.testing import assert_equal, assert_ from scipy.sparse import (csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix, save_npz, load_npz, dok_matrix) DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') def _save_and_load(matrix): fd, tmpfile = tempfile.mkstemp(suffix='.npz') os.close(fd) try: save_npz(tmpfile, matrix) loaded_matrix = load_npz(tmpfile) finally: os.remove(tmpfile) return loaded_matrix def _check_save_and_load(dense_matrix): for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]: matrix = matrix_class(dense_matrix) loaded_matrix = _save_and_load(matrix) assert_(type(loaded_matrix) is matrix_class) assert_(loaded_matrix.shape == dense_matrix.shape) assert_(loaded_matrix.dtype == dense_matrix.dtype) assert_equal(loaded_matrix.toarray(), dense_matrix) def test_save_and_load_random(): N = 10 np.random.seed(0) dense_matrix = np.random.random((N, N)) dense_matrix[dense_matrix > 0.7] = 0 _check_save_and_load(dense_matrix) def test_save_and_load_empty(): dense_matrix = np.zeros((4,6)) _check_save_and_load(dense_matrix) def test_save_and_load_one_entry(): dense_matrix = np.zeros((4,6)) dense_matrix[1,2] = 1 _check_save_and_load(dense_matrix) def test_malicious_load(): class Executor: def __reduce__(self): return (assert_, (False, 'unexpected code execution')) fd, tmpfile = tempfile.mkstemp(suffix='.npz') os.close(fd) try: np.savez(tmpfile, format=Executor()) # Should raise a ValueError, not execute code assert_raises(ValueError, load_npz, tmpfile) finally: os.remove(tmpfile) def test_py23_compatibility(): # Try loading files saved on Python 2 and Python 3. They are not # the same, since files saved with SciPy versions < 1.0.0 may # contain unicode. a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz')) b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz')) c = csc_matrix([[0]]) assert_equal(a.toarray(), c.toarray()) assert_equal(b.toarray(), c.toarray()) def test_implemented_error(): # Attempts to save an unsupported type and checks that an # NotImplementedError is raised. x = dok_matrix((2,3)) x[0,1] = 1 assert_raises(NotImplementedError, save_npz, 'x.npz', x)
2,542
28.229885
85
py
scipy
scipy-main/scipy/sparse/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/sparse/tests/test_sparsetools.py
import sys import os import gc import threading import numpy as np from numpy.testing import assert_equal, assert_, assert_allclose from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix, bsr_matrix, dia_matrix) from scipy.sparse._sputils import supported_dtypes from scipy._lib._testutils import check_free_memory import pytest from pytest import raises as assert_raises def int_to_int8(n): """ Wrap an integer to the interval [-128, 127]. """ return (n + 128) % 256 - 128 def test_exception(): assert_raises(MemoryError, _sparsetools.test_throw_error) def test_threads(): # Smoke test for parallel threaded execution; doesn't actually # check that code runs in parallel, but just that it produces # expected results. nthreads = 10 niter = 100 n = 20 a = csr_matrix(np.ones([n, n])) bres = [] class Worker(threading.Thread): def run(self): b = a.copy() for j in range(niter): _sparsetools.csr_plus_csr(n, n, a.indptr, a.indices, a.data, a.indptr, a.indices, a.data, b.indptr, b.indices, b.data) bres.append(b) threads = [Worker() for _ in range(nthreads)] for thread in threads: thread.start() for thread in threads: thread.join() for b in bres: assert_(np.all(b.toarray() == 2)) def test_regression_std_vector_dtypes(): # Regression test for gh-3780, checking the std::vector typemaps # in sparsetools.cxx are complete. for dtype in supported_dtypes: ad = np.array([[1, 2], [3, 4]]).astype(dtype) a = csr_matrix(ad, dtype=dtype) # getcol is one function using std::vector typemaps, and should not fail assert_equal(a.getcol(0).toarray(), ad[:, :1]) @pytest.mark.slow @pytest.mark.xfail_on_32bit("Can't create large array for test") def test_nnz_overflow(): # Regression test for gh-7230 / gh-7871, checking that coo_toarray # with nnz > int32max doesn't overflow. nnz = np.iinfo(np.int32).max + 1 # Ensure ~20 GB of RAM is free to run this test. check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5) # Use nnz duplicate entries to keep the dense version small. row = np.zeros(nnz, dtype=np.int32) col = np.zeros(nnz, dtype=np.int32) data = np.zeros(nnz, dtype=np.int8) data[-1] = 4 s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False) # Sums nnz duplicates to produce a 1x1 array containing 4. d = s.toarray() assert_allclose(d, [[4]]) @pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8), reason="test requires 64-bit Linux") class TestInt32Overflow: """ Some of the sparsetools routines use dense 2D matrices whose total size is not bounded by the nnz of the sparse matrix. These routines used to suffer from int32 wraparounds; here, we try to check that the wraparounds don't occur any more. """ # choose n large enough n = 50000 def setup_method(self): assert self.n**2 > np.iinfo(np.int32).max # check there's enough memory even if everything is run at the # same time try: parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1')) except ValueError: parallel_count = np.inf check_free_memory(3000 * parallel_count) def teardown_method(self): gc.collect() def test_coo_todense(self): # Check *_todense routines (cf. gh-2179) # # All of them in the end call coo_matrix.todense n = self.n i = np.array([0, n-1]) j = np.array([0, n-1]) data = np.array([1, 2], dtype=np.int8) m = coo_matrix((data, (i, j))) r = m.todense() assert_equal(r[0,0], 1) assert_equal(r[-1,-1], 2) del r gc.collect() @pytest.mark.slow def test_matvecs(self): # Check *_matvecs routines n = self.n i = np.array([0, n-1]) j = np.array([0, n-1]) data = np.array([1, 2], dtype=np.int8) m = coo_matrix((data, (i, j))) b = np.ones((n, n), dtype=np.int8) for sptype in (csr_matrix, csc_matrix, bsr_matrix): m2 = sptype(m) r = m2.dot(b) assert_equal(r[0,0], 1) assert_equal(r[-1,-1], 2) del r gc.collect() del b gc.collect() @pytest.mark.slow def test_dia_matvec(self): # Check: huge dia_matrix _matvec n = self.n data = np.ones((n, n), dtype=np.int8) offsets = np.arange(n) m = dia_matrix((data, offsets), shape=(n, n)) v = np.ones(m.shape[1], dtype=np.int8) r = m.dot(v) assert_equal(r[0], int_to_int8(n)) del data, offsets, m, v, r gc.collect() _bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow), pytest.param("matvecs", marks=pytest.mark.xslow), "matvec", "diagonal", "sort_indices", pytest.param("transpose", marks=pytest.mark.xslow)] @pytest.mark.slow @pytest.mark.parametrize("op", _bsr_ops) def test_bsr_1_block(self, op): # Check: huge bsr_matrix (1-block) # # The point here is that indices inside a block may overflow. def get_matrix(): n = self.n data = np.ones((1, n, n), dtype=np.int8) indptr = np.array([0, 1], dtype=np.int32) indices = np.array([0], dtype=np.int32) m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False) del data, indptr, indices return m gc.collect() try: getattr(self, "_check_bsr_" + op)(get_matrix) finally: gc.collect() @pytest.mark.slow @pytest.mark.parametrize("op", _bsr_ops) def test_bsr_n_block(self, op): # Check: huge bsr_matrix (n-block) # # The point here is that while indices within a block don't # overflow, accumulators across many block may. def get_matrix(): n = self.n data = np.ones((n, n, 1), dtype=np.int8) indptr = np.array([0, n], dtype=np.int32) indices = np.arange(n, dtype=np.int32) m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) del data, indptr, indices return m gc.collect() try: getattr(self, "_check_bsr_" + op)(get_matrix) finally: gc.collect() def _check_bsr_matvecs(self, m): # skip name check m = m() n = self.n # _matvecs r = m.dot(np.ones((n, 2), dtype=np.int8)) assert_equal(r[0, 0], int_to_int8(n)) def _check_bsr_matvec(self, m): # skip name check m = m() n = self.n # _matvec r = m.dot(np.ones((n,), dtype=np.int8)) assert_equal(r[0], int_to_int8(n)) def _check_bsr_diagonal(self, m): # skip name check m = m() n = self.n # _diagonal r = m.diagonal() assert_equal(r, np.ones(n)) def _check_bsr_sort_indices(self, m): # skip name check # _sort_indices m = m() m.sort_indices() def _check_bsr_transpose(self, m): # skip name check # _transpose m = m() m.transpose() def _check_bsr_matmat(self, m): # skip name check m = m() n = self.n # _bsr_matmat m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2)) m.dot(m2) # shouldn't SIGSEGV del m2 # _bsr_matmat m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0])) m2.dot(m) # shouldn't SIGSEGV @pytest.mark.skip(reason="64-bit indices in sparse matrices not available") def test_csr_matmat_int64_overflow(): n = 3037000500 assert n**2 > np.iinfo(np.int64).max # the test would take crazy amounts of memory check_free_memory(n * (8*2 + 1) * 3 / 1e6) # int64 overflow data = np.ones((n,), dtype=np.int8) indptr = np.arange(n+1, dtype=np.int64) indices = np.zeros(n, dtype=np.int64) a = csr_matrix((data, indices, indptr)) b = a.T assert_raises(RuntimeError, a.dot, b) def test_upcast(): a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex) b0 = np.array([256+1j, 2**32], dtype=complex) for a_dtype in supported_dtypes: for b_dtype in supported_dtypes: msg = f"({a_dtype!r}, {b_dtype!r})" if np.issubdtype(a_dtype, np.complexfloating): a = a0.copy().astype(a_dtype) else: a = a0.real.copy().astype(a_dtype) if np.issubdtype(b_dtype, np.complexfloating): b = b0.copy().astype(b_dtype) else: with np.errstate(invalid="ignore"): # Casting a large value (2**32) to int8 causes a warning in # numpy >1.23 b = b0.real.copy().astype(b_dtype) if not (a_dtype == np.bool_ and b_dtype == np.bool_): c = np.zeros((2,), dtype=np.bool_) assert_raises(ValueError, _sparsetools.csr_matvec, 2, 2, a.indptr, a.indices, a.data, b, c) if ((np.issubdtype(a_dtype, np.complexfloating) and not np.issubdtype(b_dtype, np.complexfloating)) or (not np.issubdtype(a_dtype, np.complexfloating) and np.issubdtype(b_dtype, np.complexfloating))): c = np.zeros((2,), dtype=np.float64) assert_raises(ValueError, _sparsetools.csr_matvec, 2, 2, a.indptr, a.indices, a.data, b, c) c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype)) _sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c) assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg) def test_endianness(): d = np.ones((3,4)) offsets = [-1,0,1] a = dia_matrix((d.astype('<f8'), offsets), (4, 4)) b = dia_matrix((d.astype('>f8'), offsets), (4, 4)) v = np.arange(4) assert_allclose(a.dot(v), [1, 3, 6, 5]) assert_allclose(b.dot(v), [1, 3, 6, 5])
10,553
30.224852
95
py
scipy
scipy-main/scipy/sparse/tests/test_base.py
# # Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others """ Test functions for sparse matrices. Each class in the "Matrix class based tests" section become subclasses of the classes in the "Generic tests" section. This is done by the functions in the "Tailored base class for generic tests" section. """ import contextlib import functools import operator import platform import itertools import sys from scipy._lib import _pep440 import numpy as np from numpy import (arange, zeros, array, dot, asarray, vstack, ndarray, transpose, diag, kron, inf, conjugate, int8, ComplexWarning) import random from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal, assert_almost_equal, assert_, assert_allclose,suppress_warnings) from pytest import raises as assert_raises import scipy.linalg import scipy.sparse as sparse from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix, coo_matrix, lil_matrix, dia_matrix, bsr_matrix, eye, issparse, SparseEfficiencyWarning) from scipy.sparse._sputils import (supported_dtypes, isscalarlike, get_index_dtype, asmatrix, matrix) from scipy.sparse.linalg import splu, expm, inv from scipy._lib.decorator import decorator import pytest IS_COLAB = ('google.colab' in sys.modules) def assert_in(member, collection, msg=None): assert_(member in collection, msg=msg if msg is not None else f"{member!r} not found in {collection!r}") def assert_array_equal_dtype(x, y, **kwargs): assert_(x.dtype == y.dtype) assert_array_equal(x, y, **kwargs) NON_ARRAY_BACKED_FORMATS = frozenset(['dok']) def sparse_may_share_memory(A, B): # Checks if A and B have any numpy array sharing memory. def _underlying_arrays(x): # Given any object (e.g. a sparse array), returns all numpy arrays # stored in any attribute. arrays = [] for a in x.__dict__.values(): if isinstance(a, (np.ndarray, np.generic)): arrays.append(a) return arrays for a in _underlying_arrays(A): for b in _underlying_arrays(B): if np.may_share_memory(a, b): return True return False sup_complex = suppress_warnings() sup_complex.filter(ComplexWarning) def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None, downcast_maxval=None, assert_32bit=False): """ Monkeypatch the maxval threshold at which scipy.sparse switches to 64-bit index arrays, or make it (pseudo-)random. """ if maxval_limit is None: maxval_limit = np.int64(10) else: # Ensure we use numpy scalars rather than Python scalars (matters for # NEP 50 casting rule changes) maxval_limit = np.int64(maxval_limit) if assert_32bit: def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): tp = get_index_dtype(arrays, maxval, check_contents) assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max) assert_(tp == np.int32 or tp == np.intc) return tp elif fixed_dtype is not None: def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): return fixed_dtype elif random: counter = np.random.RandomState(seed=1234) def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): return (np.int32, np.int64)[counter.randint(2)] else: def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): dtype = np.int32 if maxval is not None: if maxval > maxval_limit: dtype = np.int64 for arr in arrays: arr = np.asarray(arr) if arr.dtype > np.int32: if check_contents: if arr.size == 0: # a bigger type not needed continue elif np.issubdtype(arr.dtype, np.integer): maxval = arr.max() minval = arr.min() if minval >= -maxval_limit and maxval <= maxval_limit: # a bigger type not needed continue dtype = np.int64 return dtype if downcast_maxval is not None: def new_downcast_intp_index(arr): if arr.max() > downcast_maxval: raise AssertionError("downcast limited") return arr.astype(np.intp) @decorator def deco(func, *a, **kw): backup = [] modules = [scipy.sparse._bsr, scipy.sparse._coo, scipy.sparse._csc, scipy.sparse._csr, scipy.sparse._dia, scipy.sparse._dok, scipy.sparse._lil, scipy.sparse._sputils, scipy.sparse._compressed, scipy.sparse._construct] try: for mod in modules: backup.append((mod, 'get_index_dtype', getattr(mod, 'get_index_dtype', None))) setattr(mod, 'get_index_dtype', new_get_index_dtype) if downcast_maxval is not None: backup.append((mod, 'downcast_intp_index', getattr(mod, 'downcast_intp_index', None))) setattr(mod, 'downcast_intp_index', new_downcast_intp_index) return func(*a, **kw) finally: for mod, name, oldfunc in backup: if oldfunc is not None: setattr(mod, name, oldfunc) return deco def toarray(a): if isinstance(a, np.ndarray) or isscalarlike(a): return a return a.toarray() class BinopTester: # Custom type to test binary operations on sparse matrices. def __add__(self, mat): return "matrix on the right" def __mul__(self, mat): return "matrix on the right" def __sub__(self, mat): return "matrix on the right" def __radd__(self, mat): return "matrix on the left" def __rmul__(self, mat): return "matrix on the left" def __rsub__(self, mat): return "matrix on the left" def __matmul__(self, mat): return "matrix on the right" def __rmatmul__(self, mat): return "matrix on the left" class BinopTester_with_shape: # Custom type to test binary operations on sparse matrices # with object which has shape attribute. def __init__(self,shape): self._shape = shape def shape(self): return self._shape def ndim(self): return len(self._shape) def __add__(self, mat): return "matrix on the right" def __mul__(self, mat): return "matrix on the right" def __sub__(self, mat): return "matrix on the right" def __radd__(self, mat): return "matrix on the left" def __rmul__(self, mat): return "matrix on the left" def __rsub__(self, mat): return "matrix on the left" def __matmul__(self, mat): return "matrix on the right" def __rmatmul__(self, mat): return "matrix on the left" #------------------------------------------------------------------------------ # Generic tests #------------------------------------------------------------------------------ # TODO test prune # TODO test has_sorted_indices class _TestCommon: """test common functionality shared by all sparse formats""" math_dtypes = supported_dtypes @classmethod def init_class(cls): # Canonical data. cls.dat = array([[1, 0, 0, 2], [3, 0, 1, 0], [0, 2, 0, 0]], 'd') cls.datsp = cls.spcreator(cls.dat) # set array/matrix testing mode for this class based on the class attribute # Could use spcreator._is_array except that some test classes (e.g. TextCSR) # use a method to filter warnings produced when creating the sparse object. cls._is_array = cls.datsp._is_array # Some sparse and dense matrices with data for every supported dtype. # This set union is a workaround for numpy#6295, which means that # two np.int64 dtypes don't hash to the same value. cls.checked_dtypes = set(supported_dtypes).union(cls.math_dtypes) cls.dat_dtypes = {} cls.datsp_dtypes = {} for dtype in cls.checked_dtypes: cls.dat_dtypes[dtype] = cls.dat.astype(dtype) cls.datsp_dtypes[dtype] = cls.spcreator(cls.dat.astype(dtype)) # Check that the original data is equivalent to the # corresponding dat_dtypes & datsp_dtypes. assert_equal(cls.dat, cls.dat_dtypes[np.float64]) assert_equal(cls.datsp.toarray(), cls.datsp_dtypes[np.float64].toarray()) def test_bool(self): def check(dtype): datsp = self.datsp_dtypes[dtype] assert_raises(ValueError, bool, datsp) assert_(self.spcreator([1])) assert_(not self.spcreator([0])) if isinstance(self, TestDOK): pytest.skip("Cannot create a rank <= 2 DOK matrix.") for dtype in self.checked_dtypes: check(dtype) def test_bool_rollover(self): # bool's underlying dtype is 1 byte, check that it does not # rollover True -> False at 256. dat = array([[True, False]]) datsp = self.spcreator(dat) for _ in range(10): datsp = datsp + datsp dat = dat + dat assert_array_equal(dat, datsp.toarray()) def test_eq(self): sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup @sup_complex def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) datbsr = bsr_matrix(dat) datcsr = csr_matrix(dat) datcsc = csc_matrix(dat) datlil = lil_matrix(dat) # sparse/sparse assert_array_equal_dtype(dat == dat2, (datsp == datsp2).toarray()) # mix sparse types assert_array_equal_dtype(dat == dat2, (datbsr == datsp2).toarray()) assert_array_equal_dtype(dat == dat2, (datcsr == datsp2).toarray()) assert_array_equal_dtype(dat == dat2, (datcsc == datsp2).toarray()) assert_array_equal_dtype(dat == dat2, (datlil == datsp2).toarray()) # sparse/dense assert_array_equal_dtype(dat == datsp2, datsp2 == dat) # sparse/scalar assert_array_equal_dtype(dat == 0, (datsp == 0).toarray()) assert_array_equal_dtype(dat == 1, (datsp == 1).toarray()) assert_array_equal_dtype(dat == np.nan, (datsp == np.nan).toarray()) if not isinstance(self, (TestBSR, TestCSC, TestCSR)): pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") for dtype in self.checked_dtypes: check(dtype) def test_ne(self): sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup @sup_complex def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) datbsr = bsr_matrix(dat) datcsc = csc_matrix(dat) datcsr = csr_matrix(dat) datlil = lil_matrix(dat) # sparse/sparse assert_array_equal_dtype(dat != dat2, (datsp != datsp2).toarray()) # mix sparse types assert_array_equal_dtype(dat != dat2, (datbsr != datsp2).toarray()) assert_array_equal_dtype(dat != dat2, (datcsc != datsp2).toarray()) assert_array_equal_dtype(dat != dat2, (datcsr != datsp2).toarray()) assert_array_equal_dtype(dat != dat2, (datlil != datsp2).toarray()) # sparse/dense assert_array_equal_dtype(dat != datsp2, datsp2 != dat) # sparse/scalar assert_array_equal_dtype(dat != 0, (datsp != 0).toarray()) assert_array_equal_dtype(dat != 1, (datsp != 1).toarray()) assert_array_equal_dtype(0 != dat, (0 != datsp).toarray()) assert_array_equal_dtype(1 != dat, (1 != datsp).toarray()) assert_array_equal_dtype(dat != np.nan, (datsp != np.nan).toarray()) if not isinstance(self, (TestBSR, TestCSC, TestCSR)): pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") for dtype in self.checked_dtypes: check(dtype) def test_lt(self): sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup @sup_complex def check(dtype): # data dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) datcomplex = dat.astype(complex) datcomplex[:,0] = 1 + 1j datspcomplex = self.spcreator(datcomplex) datbsr = bsr_matrix(dat) datcsc = csc_matrix(dat) datcsr = csr_matrix(dat) datlil = lil_matrix(dat) # sparse/sparse assert_array_equal_dtype(dat < dat2, (datsp < datsp2).toarray()) assert_array_equal_dtype(datcomplex < dat2, (datspcomplex < datsp2).toarray()) # mix sparse types assert_array_equal_dtype(dat < dat2, (datbsr < datsp2).toarray()) assert_array_equal_dtype(dat < dat2, (datcsc < datsp2).toarray()) assert_array_equal_dtype(dat < dat2, (datcsr < datsp2).toarray()) assert_array_equal_dtype(dat < dat2, (datlil < datsp2).toarray()) assert_array_equal_dtype(dat2 < dat, (datsp2 < datbsr).toarray()) assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsc).toarray()) assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsr).toarray()) assert_array_equal_dtype(dat2 < dat, (datsp2 < datlil).toarray()) # sparse/dense assert_array_equal_dtype(dat < dat2, datsp < dat2) assert_array_equal_dtype(datcomplex < dat2, datspcomplex < dat2) # sparse/scalar for val in [2, 1, 0, -1, -2]: val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) assert_array_equal_dtype((datsp < val).toarray(), dat < val) assert_array_equal_dtype((val < datsp).toarray(), val < dat) with np.errstate(invalid='ignore'): assert_array_equal_dtype((datsp < np.nan).toarray(), dat < np.nan) # data dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) # dense rhs assert_array_equal_dtype(dat < datsp2, datsp < dat2) if not isinstance(self, (TestBSR, TestCSC, TestCSR)): pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") for dtype in self.checked_dtypes: check(dtype) def test_gt(self): sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup @sup_complex def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) datcomplex = dat.astype(complex) datcomplex[:,0] = 1 + 1j datspcomplex = self.spcreator(datcomplex) datbsr = bsr_matrix(dat) datcsc = csc_matrix(dat) datcsr = csr_matrix(dat) datlil = lil_matrix(dat) # sparse/sparse assert_array_equal_dtype(dat > dat2, (datsp > datsp2).toarray()) assert_array_equal_dtype(datcomplex > dat2, (datspcomplex > datsp2).toarray()) # mix sparse types assert_array_equal_dtype(dat > dat2, (datbsr > datsp2).toarray()) assert_array_equal_dtype(dat > dat2, (datcsc > datsp2).toarray()) assert_array_equal_dtype(dat > dat2, (datcsr > datsp2).toarray()) assert_array_equal_dtype(dat > dat2, (datlil > datsp2).toarray()) assert_array_equal_dtype(dat2 > dat, (datsp2 > datbsr).toarray()) assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsc).toarray()) assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsr).toarray()) assert_array_equal_dtype(dat2 > dat, (datsp2 > datlil).toarray()) # sparse/dense assert_array_equal_dtype(dat > dat2, datsp > dat2) assert_array_equal_dtype(datcomplex > dat2, datspcomplex > dat2) # sparse/scalar for val in [2, 1, 0, -1, -2]: val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) assert_array_equal_dtype((datsp > val).toarray(), dat > val) assert_array_equal_dtype((val > datsp).toarray(), val > dat) with np.errstate(invalid='ignore'): assert_array_equal_dtype((datsp > np.nan).toarray(), dat > np.nan) # data dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) # dense rhs assert_array_equal_dtype(dat > datsp2, datsp > dat2) if not isinstance(self, (TestBSR, TestCSC, TestCSR)): pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") for dtype in self.checked_dtypes: check(dtype) def test_le(self): sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup @sup_complex def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) datcomplex = dat.astype(complex) datcomplex[:,0] = 1 + 1j datspcomplex = self.spcreator(datcomplex) datbsr = bsr_matrix(dat) datcsc = csc_matrix(dat) datcsr = csr_matrix(dat) datlil = lil_matrix(dat) # sparse/sparse assert_array_equal_dtype(dat <= dat2, (datsp <= datsp2).toarray()) assert_array_equal_dtype(datcomplex <= dat2, (datspcomplex <= datsp2).toarray()) # mix sparse types assert_array_equal_dtype((datbsr <= datsp2).toarray(), dat <= dat2) assert_array_equal_dtype((datcsc <= datsp2).toarray(), dat <= dat2) assert_array_equal_dtype((datcsr <= datsp2).toarray(), dat <= dat2) assert_array_equal_dtype((datlil <= datsp2).toarray(), dat <= dat2) assert_array_equal_dtype((datsp2 <= datbsr).toarray(), dat2 <= dat) assert_array_equal_dtype((datsp2 <= datcsc).toarray(), dat2 <= dat) assert_array_equal_dtype((datsp2 <= datcsr).toarray(), dat2 <= dat) assert_array_equal_dtype((datsp2 <= datlil).toarray(), dat2 <= dat) # sparse/dense assert_array_equal_dtype(datsp <= dat2, dat <= dat2) assert_array_equal_dtype(datspcomplex <= dat2, datcomplex <= dat2) # sparse/scalar for val in [2, 1, -1, -2]: val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) assert_array_equal_dtype((datsp <= val).toarray(), dat <= val) assert_array_equal_dtype((val <= datsp).toarray(), val <= dat) # data dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) # dense rhs assert_array_equal_dtype(dat <= datsp2, datsp <= dat2) if not isinstance(self, (TestBSR, TestCSC, TestCSR)): pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") for dtype in self.checked_dtypes: check(dtype) def test_ge(self): sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup @sup_complex def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) datcomplex = dat.astype(complex) datcomplex[:,0] = 1 + 1j datspcomplex = self.spcreator(datcomplex) datbsr = bsr_matrix(dat) datcsc = csc_matrix(dat) datcsr = csr_matrix(dat) datlil = lil_matrix(dat) # sparse/sparse assert_array_equal_dtype(dat >= dat2, (datsp >= datsp2).toarray()) assert_array_equal_dtype(datcomplex >= dat2, (datspcomplex >= datsp2).toarray()) # mix sparse types assert_array_equal_dtype((datbsr >= datsp2).toarray(), dat >= dat2) assert_array_equal_dtype((datcsc >= datsp2).toarray(), dat >= dat2) assert_array_equal_dtype((datcsr >= datsp2).toarray(), dat >= dat2) assert_array_equal_dtype((datlil >= datsp2).toarray(), dat >= dat2) assert_array_equal_dtype((datsp2 >= datbsr).toarray(), dat2 >= dat) assert_array_equal_dtype((datsp2 >= datcsc).toarray(), dat2 >= dat) assert_array_equal_dtype((datsp2 >= datcsr).toarray(), dat2 >= dat) assert_array_equal_dtype((datsp2 >= datlil).toarray(), dat2 >= dat) # sparse/dense assert_array_equal_dtype(datsp >= dat2, dat >= dat2) assert_array_equal_dtype(datspcomplex >= dat2, datcomplex >= dat2) # sparse/scalar for val in [2, 1, -1, -2]: val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) assert_array_equal_dtype((datsp >= val).toarray(), dat >= val) assert_array_equal_dtype((val >= datsp).toarray(), val >= dat) # dense data dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] dat2 = dat.copy() dat2[:,0] = 0 datsp2 = self.spcreator(dat2) # dense rhs assert_array_equal_dtype(dat >= datsp2, datsp >= dat2) if not isinstance(self, (TestBSR, TestCSC, TestCSR)): pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") for dtype in self.checked_dtypes: check(dtype) def test_empty(self): # create empty matrices assert_equal(self.spcreator((3, 3)).toarray(), zeros((3, 3))) assert_equal(self.spcreator((3, 3)).nnz, 0) assert_equal(self.spcreator((3, 3)).count_nonzero(), 0) def test_count_nonzero(self): expected = np.count_nonzero(self.datsp.toarray()) assert_equal(self.datsp.count_nonzero(), expected) assert_equal(self.datsp.T.count_nonzero(), expected) def test_invalid_shapes(self): assert_raises(ValueError, self.spcreator, (-1,3)) assert_raises(ValueError, self.spcreator, (3,-1)) assert_raises(ValueError, self.spcreator, (-1,-1)) def test_repr(self): repr(self.datsp) def test_str(self): str(self.datsp) def test_empty_arithmetic(self): # Test manipulating empty matrices. Fails in SciPy SVN <= r1768 shape = (5, 5) for mytype in [np.dtype('int32'), np.dtype('float32'), np.dtype('float64'), np.dtype('complex64'), np.dtype('complex128')]: a = self.spcreator(shape, dtype=mytype) b = a + a c = 2 * a d = a @ a.tocsc() e = a @ a.tocsr() f = a @ a.tocoo() for m in [a,b,c,d,e,f]: assert_equal(m.toarray(), a.toarray()@a.toarray()) # These fail in all revisions <= r1768: assert_equal(m.dtype,mytype) assert_equal(m.toarray().dtype,mytype) def test_abs(self): A = array([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd') assert_equal(abs(A), abs(self.spcreator(A)).toarray()) def test_round(self): decimal = 1 A = array([[-1.35, 0.56], [17.25, -5.98]], 'd') assert_equal(np.around(A, decimals=decimal), round(self.spcreator(A), ndigits=decimal).toarray()) def test_elementwise_power(self): A = array([[-4, -3, -2], [-1, 0, 1], [2, 3, 4]], 'd') assert_equal(np.power(A, 2), self.spcreator(A).power(2).toarray()) #it's element-wise power function, input has to be a scalar assert_raises(NotImplementedError, self.spcreator(A).power, A) def test_neg(self): A = array([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd') assert_equal(-A, (-self.spcreator(A)).toarray()) # see gh-5843 A = array([[True, False, False], [False, False, True]]) assert_raises(NotImplementedError, self.spcreator(A).__neg__) def test_real(self): D = array([[1 + 3j, 2 - 4j]]) A = self.spcreator(D) assert_equal(A.real.toarray(), D.real) def test_imag(self): D = array([[1 + 3j, 2 - 4j]]) A = self.spcreator(D) assert_equal(A.imag.toarray(), D.imag) def test_diagonal(self): # Does the matrix's .diagonal() method work? mats = [] mats.append([[1,0,2]]) mats.append([[1],[0],[2]]) mats.append([[0,1],[0,2],[0,3]]) mats.append([[0,0,1],[0,0,2],[0,3,0]]) mats.append([[1,0],[0,0]]) mats.append(kron(mats[0],[[1,2]])) mats.append(kron(mats[0],[[1],[2]])) mats.append(kron(mats[1],[[1,2],[3,4]])) mats.append(kron(mats[2],[[1,2],[3,4]])) mats.append(kron(mats[3],[[1,2],[3,4]])) mats.append(kron(mats[3],[[1,2,3,4]])) for m in mats: rows, cols = array(m).shape sparse_mat = self.spcreator(m) for k in range(-rows-1, cols+2): assert_equal(sparse_mat.diagonal(k=k), diag(m, k=k)) # Test for k beyond boundaries(issue #11949) assert_equal(sparse_mat.diagonal(k=10), diag(m, k=10)) assert_equal(sparse_mat.diagonal(k=-99), diag(m, k=-99)) # Test all-zero matrix. assert_equal(self.spcreator((40, 16130)).diagonal(), np.zeros(40)) # Test empty matrix # https://github.com/scipy/scipy/issues/11949 assert_equal(self.spcreator((0, 0)).diagonal(), np.empty(0)) assert_equal(self.spcreator((15, 0)).diagonal(), np.empty(0)) assert_equal(self.spcreator((0, 5)).diagonal(10), np.empty(0)) def test_trace(self): # For square matrix A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) B = self.spcreator(A) for k in range(-2, 3): assert_equal(A.trace(offset=k), B.trace(offset=k)) # For rectangular matrix A = np.array([[1, 2, 3], [4, 5, 6]]) B = self.spcreator(A) for k in range(-1, 3): assert_equal(A.trace(offset=k), B.trace(offset=k)) def test_reshape(self): # This first example is taken from the lil_matrix reshaping test. x = self.spcreator([[1, 0, 7], [0, 0, 0], [0, 3, 0], [0, 0, 5]]) for order in ['C', 'F']: for s in [(12, 1), (1, 12)]: assert_array_equal(x.reshape(s, order=order).toarray(), x.toarray().reshape(s, order=order)) # This example is taken from the stackoverflow answer at # https://stackoverflow.com/q/16511879 x = self.spcreator([[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]]) y = x.reshape((2, 6)) # Default order is 'C' desired = [[0, 10, 0, 0, 0, 0], [0, 0, 0, 20, 30, 40]] assert_array_equal(y.toarray(), desired) # Reshape with negative indexes y = x.reshape((2, -1)) assert_array_equal(y.toarray(), desired) y = x.reshape((-1, 6)) assert_array_equal(y.toarray(), desired) assert_raises(ValueError, x.reshape, (-1, -1)) # Reshape with star args y = x.reshape(2, 6) assert_array_equal(y.toarray(), desired) assert_raises(TypeError, x.reshape, 2, 6, not_an_arg=1) # Reshape with same size is noop unless copy=True y = x.reshape((3, 4)) assert_(y is x) y = x.reshape((3, 4), copy=True) assert_(y is not x) # Ensure reshape did not alter original size assert_array_equal(x.shape, (3, 4)) # Reshape in place x.shape = (2, 6) assert_array_equal(x.toarray(), desired) # Reshape to bad ndim assert_raises(ValueError, x.reshape, (x.size,)) assert_raises(ValueError, x.reshape, (1, x.size, 1)) @pytest.mark.slow def test_setdiag_comprehensive(self): def dense_setdiag(a, v, k): v = np.asarray(v) if k >= 0: n = min(a.shape[0], a.shape[1] - k) if v.ndim != 0: n = min(n, len(v)) v = v[:n] i = np.arange(0, n) j = np.arange(k, k + n) a[i,j] = v elif k < 0: dense_setdiag(a.T, v, -k) def check_setdiag(a, b, k): # Check setting diagonal using a scalar, a vector of # correct length, and too short or too long vectors for r in [-1, len(np.diag(a, k)), 2, 30]: if r < 0: v = np.random.choice(range(1, 20)) else: v = np.random.randint(1, 20, size=r) dense_setdiag(a, v, k) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") b.setdiag(v, k) # check that dense_setdiag worked d = np.diag(a, k) if np.asarray(v).ndim == 0: assert_array_equal(d, v, err_msg="%s %d" % (msg, r)) else: n = min(len(d), len(v)) assert_array_equal(d[:n], v[:n], err_msg="%s %d" % (msg, r)) # check that sparse setdiag worked assert_array_equal(b.A, a, err_msg="%s %d" % (msg, r)) # comprehensive test np.random.seed(1234) shapes = [(0,5), (5,0), (1,5), (5,1), (5,5)] for dtype in [np.int8, np.float64]: for m,n in shapes: ks = np.arange(-m+1, n-1) for k in ks: msg = repr((dtype, m, n, k)) a = np.zeros((m, n), dtype=dtype) b = self.spcreator((m, n), dtype=dtype) check_setdiag(a, b, k) # check overwriting etc for k2 in np.random.choice(ks, size=min(len(ks), 5)): check_setdiag(a, b, k2) def test_setdiag(self): # simple test cases m = self.spcreator(np.eye(3)) m2 = self.spcreator((4, 4)) values = [3, 2, 1] with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") assert_raises(ValueError, m.setdiag, values, k=4) m.setdiag(values) assert_array_equal(m.diagonal(), values) m.setdiag(values, k=1) assert_array_equal(m.toarray(), np.array([[3, 3, 0], [0, 2, 2], [0, 0, 1]])) m.setdiag(values, k=-2) assert_array_equal(m.toarray(), np.array([[3, 3, 0], [0, 2, 2], [3, 0, 1]])) m.setdiag((9,), k=2) assert_array_equal(m.toarray()[0,2], 9) m.setdiag((9,), k=-2) assert_array_equal(m.toarray()[2,0], 9) # test short values on an empty matrix m2.setdiag([1], k=2) assert_array_equal(m2.toarray()[0], [0, 0, 1, 0]) # test overwriting that same diagonal m2.setdiag([1, 1], k=2) assert_array_equal(m2.toarray()[:2], [[0, 0, 1, 0], [0, 0, 0, 1]]) def test_nonzero(self): A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]]) Asp = self.spcreator(A) A_nz = {tuple(ij) for ij in transpose(A.nonzero())} Asp_nz = {tuple(ij) for ij in transpose(Asp.nonzero())} assert_equal(A_nz, Asp_nz) def test_numpy_nonzero(self): # See gh-5987 A = array([[1, 0, 1], [0, 1, 1], [0, 0, 1]]) Asp = self.spcreator(A) A_nz = {tuple(ij) for ij in transpose(np.nonzero(A))} Asp_nz = {tuple(ij) for ij in transpose(np.nonzero(Asp))} assert_equal(A_nz, Asp_nz) def test_getrow(self): assert_array_equal(self.datsp.getrow(1).toarray(), self.dat[[1], :]) assert_array_equal(self.datsp.getrow(-1).toarray(), self.dat[[-1], :]) def test_getcol(self): assert_array_equal(self.datsp.getcol(1).toarray(), self.dat[:, [1]]) assert_array_equal(self.datsp.getcol(-1).toarray(), self.dat[:, [-1]]) def test_sum(self): np.random.seed(1234) dat_1 = matrix([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) dat_2 = np.random.rand(5, 5) dat_3 = np.array([[]]) dat_4 = np.zeros((40, 40)) dat_5 = sparse.rand(5, 5, density=1e-2).toarray() matrices = [dat_1, dat_2, dat_3, dat_4, dat_5] def check(dtype, j): dat = matrix(matrices[j], dtype=dtype) datsp = self.spcreator(dat, dtype=dtype) with np.errstate(over='ignore'): assert_array_almost_equal(dat.sum(), datsp.sum()) assert_equal(dat.sum().dtype, datsp.sum().dtype) assert_(np.isscalar(datsp.sum(axis=None))) assert_array_almost_equal(dat.sum(axis=None), datsp.sum(axis=None)) assert_equal(dat.sum(axis=None).dtype, datsp.sum(axis=None).dtype) assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype) assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype) assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2)) assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype) assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1)) assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype) for dtype in self.checked_dtypes: for j in range(len(matrices)): check(dtype, j) def test_sum_invalid_params(self): out = np.zeros((1, 3)) dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) assert_raises(ValueError, datsp.sum, axis=3) assert_raises(TypeError, datsp.sum, axis=(0, 1)) assert_raises(TypeError, datsp.sum, axis=1.5) assert_raises(ValueError, datsp.sum, axis=1, out=out) def test_sum_dtype(self): dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) def check(dtype): dat_mean = dat.mean(dtype=dtype) datsp_mean = datsp.mean(dtype=dtype) assert_array_almost_equal(dat_mean, datsp_mean) assert_equal(dat_mean.dtype, datsp_mean.dtype) for dtype in self.checked_dtypes: check(dtype) def test_sum_out(self): dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) dat_out = array([[0]]) datsp_out = matrix([[0]]) dat.sum(out=dat_out, keepdims=True) datsp.sum(out=datsp_out) assert_array_almost_equal(dat_out, datsp_out) dat_out = np.zeros((3, 1)) datsp_out = asmatrix(np.zeros((3, 1))) dat.sum(axis=1, out=dat_out, keepdims=True) datsp.sum(axis=1, out=datsp_out) assert_array_almost_equal(dat_out, datsp_out) def test_numpy_sum(self): # See gh-5987 dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) dat_mean = np.sum(dat) datsp_mean = np.sum(datsp) assert_array_almost_equal(dat_mean, datsp_mean) assert_equal(dat_mean.dtype, datsp_mean.dtype) def test_mean(self): def check(dtype): dat = array([[0, 1, 2], [3, 4, 5], [6, 7, 9]], dtype=dtype) datsp = self.spcreator(dat, dtype=dtype) assert_array_almost_equal(dat.mean(), datsp.mean()) assert_equal(dat.mean().dtype, datsp.mean().dtype) assert_(np.isscalar(datsp.mean(axis=None))) assert_array_almost_equal( dat.mean(axis=None, keepdims=True), datsp.mean(axis=None) ) assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype) assert_array_almost_equal( dat.mean(axis=0, keepdims=True), datsp.mean(axis=0) ) assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype) assert_array_almost_equal( dat.mean(axis=1, keepdims=True), datsp.mean(axis=1) ) assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype) assert_array_almost_equal( dat.mean(axis=-2, keepdims=True), datsp.mean(axis=-2) ) assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype) assert_array_almost_equal( dat.mean(axis=-1, keepdims=True), datsp.mean(axis=-1) ) assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype) for dtype in self.checked_dtypes: check(dtype) def test_mean_invalid_params(self): out = asmatrix(np.zeros((1, 3))) dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) assert_raises(ValueError, datsp.mean, axis=3) assert_raises(TypeError, datsp.mean, axis=(0, 1)) assert_raises(TypeError, datsp.mean, axis=1.5) assert_raises(ValueError, datsp.mean, axis=1, out=out) def test_mean_dtype(self): dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) def check(dtype): dat_mean = dat.mean(dtype=dtype) datsp_mean = datsp.mean(dtype=dtype) assert_array_almost_equal(dat_mean, datsp_mean) assert_equal(dat_mean.dtype, datsp_mean.dtype) for dtype in self.checked_dtypes: check(dtype) def test_mean_out(self): dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) dat_out = array([[0]]) datsp_out = matrix([[0]]) dat.mean(out=dat_out, keepdims=True) datsp.mean(out=datsp_out) assert_array_almost_equal(dat_out, datsp_out) dat_out = np.zeros((3, 1)) datsp_out = matrix(np.zeros((3, 1))) dat.mean(axis=1, out=dat_out, keepdims=True) datsp.mean(axis=1, out=datsp_out) assert_array_almost_equal(dat_out, datsp_out) def test_numpy_mean(self): # See gh-5987 dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) dat_mean = np.mean(dat) datsp_mean = np.mean(datsp) assert_array_almost_equal(dat_mean, datsp_mean) assert_equal(dat_mean.dtype, datsp_mean.dtype) def test_expm(self): M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) sM = self.spcreator(M, shape=(3,3), dtype=float) Mexp = scipy.linalg.expm(M) N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]]) sN = self.spcreator(N, shape=(3,3), dtype=float) Nexp = scipy.linalg.expm(N) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu converted its input to CSC format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") sup.filter(SparseEfficiencyWarning, "spsolve requires A be CSC or CSR matrix format") sMexp = expm(sM).toarray() sNexp = expm(sN).toarray() assert_array_almost_equal((sMexp - Mexp), zeros((3, 3))) assert_array_almost_equal((sNexp - Nexp), zeros((3, 3))) def test_inv(self): def check(dtype): M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "spsolve requires A be CSC or CSR matrix format") sup.filter(SparseEfficiencyWarning, "spsolve is more efficient when sparse b is in the CSC matrix format") sup.filter(SparseEfficiencyWarning, "splu converted its input to CSC format") sM = self.spcreator(M, shape=(3,3), dtype=dtype) sMinv = inv(sM) assert_array_almost_equal(sMinv.dot(sM).toarray(), np.eye(3)) assert_raises(TypeError, inv, M) for dtype in [float]: check(dtype) @sup_complex def test_from_array(self): A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) assert_array_equal(self.spcreator(A).toarray(), A) A = array([[1.0 + 3j, 0, 0], [0, 2.0 + 5, 0], [0, 0, 0]]) assert_array_equal(self.spcreator(A).toarray(), A) assert_array_equal(self.spcreator(A, dtype='int16').toarray(),A.astype('int16')) @sup_complex def test_from_matrix(self): A = matrix([[1, 0, 0], [2, 3, 4], [0, 5, 0], [0, 0, 0]]) assert_array_equal(self.spcreator(A).todense(), A) A = matrix([[1.0 + 3j, 0, 0], [0, 2.0 + 5, 0], [0, 0, 0]]) assert_array_equal(self.spcreator(A).todense(), A) assert_array_equal( self.spcreator(A, dtype='int16').todense(), A.astype('int16') ) @sup_complex def test_from_list(self): A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]] assert_array_equal(self.spcreator(A).toarray(), A) A = [[1.0 + 3j, 0, 0], [0, 2.0 + 5, 0], [0, 0, 0]] assert_array_equal(self.spcreator(A).toarray(), array(A)) assert_array_equal( self.spcreator(A, dtype='int16').toarray(), array(A).astype('int16') ) @sup_complex def test_from_sparse(self): D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) S = csr_matrix(D) assert_array_equal(self.spcreator(S).toarray(), D) S = self.spcreator(D) assert_array_equal(self.spcreator(S).toarray(), D) D = array([[1.0 + 3j, 0, 0], [0, 2.0 + 5, 0], [0, 0, 0]]) S = csr_matrix(D) assert_array_equal(self.spcreator(S).toarray(), D) assert_array_equal(self.spcreator(S, dtype='int16').toarray(), D.astype('int16')) S = self.spcreator(D) assert_array_equal(self.spcreator(S).toarray(), D) assert_array_equal(self.spcreator(S, dtype='int16').toarray(), D.astype('int16')) # def test_array(self): # """test array(A) where A is in sparse format""" # assert_equal( array(self.datsp), self.dat ) def test_todense(self): # Check C- or F-contiguous (default). chk = self.datsp.todense() assert isinstance(chk, np.matrix) assert_array_equal(chk, self.dat) assert_(chk.flags.c_contiguous != chk.flags.f_contiguous) # Check C-contiguous (with arg). chk = self.datsp.todense(order='C') assert_array_equal(chk, self.dat) assert_(chk.flags.c_contiguous) assert_(not chk.flags.f_contiguous) # Check F-contiguous (with arg). chk = self.datsp.todense(order='F') assert_array_equal(chk, self.dat) assert_(not chk.flags.c_contiguous) assert_(chk.flags.f_contiguous) # Check with out argument (array). out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype) chk = self.datsp.todense(out=out) assert_array_equal(self.dat, out) assert_array_equal(self.dat, chk) assert_(chk.base is out) # Check with out array (matrix). out = asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype)) chk = self.datsp.todense(out=out) assert_array_equal(self.dat, out) assert_array_equal(self.dat, chk) assert_(chk is out) a = array([[1.,2.,3.]]) dense_dot_dense = a @ self.dat check = a @ self.datsp.todense() assert_array_equal(dense_dot_dense, check) b = array([[1.,2.,3.,4.]]).T dense_dot_dense = self.dat @ b check2 = self.datsp.todense() @ b assert_array_equal(dense_dot_dense, check2) # Check bool data works. spbool = self.spcreator(self.dat, dtype=bool) matbool = self.dat.astype(bool) assert_array_equal(spbool.todense(), matbool) def test_toarray(self): # Check C- or F-contiguous (default). dat = asarray(self.dat) chk = self.datsp.toarray() assert_array_equal(chk, dat) assert_(chk.flags.c_contiguous != chk.flags.f_contiguous) # Check C-contiguous (with arg). chk = self.datsp.toarray(order='C') assert_array_equal(chk, dat) assert_(chk.flags.c_contiguous) assert_(not chk.flags.f_contiguous) # Check F-contiguous (with arg). chk = self.datsp.toarray(order='F') assert_array_equal(chk, dat) assert_(not chk.flags.c_contiguous) assert_(chk.flags.f_contiguous) # Check with output arg. out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype) self.datsp.toarray(out=out) assert_array_equal(chk, dat) # Check that things are fine when we don't initialize with zeros. out[...] = 1. self.datsp.toarray(out=out) assert_array_equal(chk, dat) a = array([1.,2.,3.]) dense_dot_dense = dot(a, dat) check = dot(a, self.datsp.toarray()) assert_array_equal(dense_dot_dense, check) b = array([1.,2.,3.,4.]) dense_dot_dense = dot(dat, b) check2 = dot(self.datsp.toarray(), b) assert_array_equal(dense_dot_dense, check2) # Check bool data works. spbool = self.spcreator(self.dat, dtype=bool) arrbool = dat.astype(bool) assert_array_equal(spbool.toarray(), arrbool) @sup_complex def test_astype(self): D = array([[2.0 + 3j, 0, 0], [0, 4.0 + 5j, 0], [0, 0, 0]]) S = self.spcreator(D) for x in supported_dtypes: # Check correctly casted D_casted = D.astype(x) for copy in (True, False): S_casted = S.astype(x, copy=copy) assert_equal(S_casted.dtype, D_casted.dtype) # correct type assert_equal(S_casted.toarray(), D_casted) # correct values assert_equal(S_casted.format, S.format) # format preserved # Check correctly copied assert_(S_casted.astype(x, copy=False) is S_casted) S_copied = S_casted.astype(x, copy=True) assert_(S_copied is not S_casted) def check_equal_but_not_same_array_attribute(attribute): a = getattr(S_casted, attribute) b = getattr(S_copied, attribute) assert_array_equal(a, b) assert_(a is not b) i = (0,) * b.ndim b_i = b[i] b[i] = not b[i] assert_(a[i] != b[i]) b[i] = b_i if S_casted.format in ('csr', 'csc', 'bsr'): for attribute in ('indices', 'indptr', 'data'): check_equal_but_not_same_array_attribute(attribute) elif S_casted.format == 'coo': for attribute in ('row', 'col', 'data'): check_equal_but_not_same_array_attribute(attribute) elif S_casted.format == 'dia': for attribute in ('offsets', 'data'): check_equal_but_not_same_array_attribute(attribute) @sup_complex def test_astype_immutable(self): D = array([[2.0 + 3j, 0, 0], [0, 4.0 + 5j, 0], [0, 0, 0]]) S = self.spcreator(D) if hasattr(S, 'data'): S.data.flags.writeable = False if hasattr(S, 'indptr'): S.indptr.flags.writeable = False if hasattr(S, 'indices'): S.indices.flags.writeable = False for x in supported_dtypes: D_casted = D.astype(x) S_casted = S.astype(x) assert_equal(S_casted.dtype, D_casted.dtype) def test_asfptype(self): A = self.spcreator(arange(6,dtype='int32').reshape(2,3)) assert_equal(A.dtype, np.dtype('int32')) assert_equal(A.asfptype().dtype, np.dtype('float64')) assert_equal(A.asfptype().format, A.format) assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32')) assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128')) B = A.asfptype() C = B.asfptype() assert_(B is C) def test_mul_scalar(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] assert_array_equal(dat*2, (datsp*2).toarray()) assert_array_equal(dat*17.3, (datsp*17.3).toarray()) for dtype in self.math_dtypes: check(dtype) def test_rmul_scalar(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] assert_array_equal(2*dat, (2*datsp).toarray()) assert_array_equal(17.3*dat, (17.3*datsp).toarray()) for dtype in self.math_dtypes: check(dtype) # github issue #15210 def test_rmul_scalar_type_error(self): datsp = self.datsp_dtypes[np.float64] with assert_raises(TypeError): None * datsp def test_add(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] a = dat.copy() a[0,2] = 2.0 b = datsp c = b + a assert_array_equal(c, b.toarray() + a) c = b + b.tocsr() assert_array_equal(c.toarray(), b.toarray() + b.toarray()) # test broadcasting c = b + a[0] assert_array_equal(c, b.toarray() + a[0]) for dtype in self.math_dtypes: check(dtype) def test_radd(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] a = dat.copy() a[0,2] = 2.0 b = datsp c = a + b assert_array_equal(c, a + b.toarray()) for dtype in self.math_dtypes: check(dtype) def test_sub(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] assert_array_equal((datsp - datsp).toarray(), np.zeros((3, 4))) assert_array_equal((datsp - 0).toarray(), dat) A = self.spcreator( np.array([[1, 0, 0, 4], [-1, 0, 0, 0], [0, 8, 0, -5]], 'd') ) assert_array_equal((datsp - A).toarray(), dat - A.toarray()) assert_array_equal((A - datsp).toarray(), A.toarray() - dat) # test broadcasting assert_array_equal(datsp - dat[0], dat - dat[0]) for dtype in self.math_dtypes: if dtype == np.dtype('bool'): # boolean array subtraction deprecated in 1.9.0 continue check(dtype) def test_rsub(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) assert_array_equal((0 - datsp).toarray(), -dat) A = self.spcreator(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) assert_array_equal((dat - A), dat - A.toarray()) assert_array_equal((A - dat), A.toarray() - dat) assert_array_equal(A.toarray() - datsp, A.toarray() - dat) assert_array_equal(datsp - A.toarray(), dat - A.toarray()) # test broadcasting assert_array_equal(dat[0] - datsp, dat[0] - dat) for dtype in self.math_dtypes: if dtype == np.dtype('bool'): # boolean array subtraction deprecated in 1.9.0 continue check(dtype) def test_add0(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] # Adding 0 to a sparse matrix assert_array_equal((datsp + 0).toarray(), dat) # use sum (which takes 0 as a starting value) sumS = sum([k * datsp for k in range(1, 3)]) sumD = sum([k * dat for k in range(1, 3)]) assert_almost_equal(sumS.toarray(), sumD) for dtype in self.math_dtypes: check(dtype) def test_elementwise_multiply(self): # real/real A = array([[4,0,9],[2,-3,5]]) B = array([[0,7,0],[0,-4,0]]) Asp = self.spcreator(A) Bsp = self.spcreator(B) assert_almost_equal(Asp.multiply(Bsp).toarray(), A*B) # sparse/sparse assert_almost_equal(Asp.multiply(B).toarray(), A*B) # sparse/dense # complex/complex C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) Csp = self.spcreator(C) Dsp = self.spcreator(D) assert_almost_equal(Csp.multiply(Dsp).toarray(), C*D) # sparse/sparse assert_almost_equal(Csp.multiply(D).toarray(), C*D) # sparse/dense # real/complex assert_almost_equal(Asp.multiply(Dsp).toarray(), A*D) # sparse/sparse assert_almost_equal(Asp.multiply(D).toarray(), A*D) # sparse/dense def test_elementwise_multiply_broadcast(self): A = array([4]) B = array([[-9]]) C = array([1,-1,0]) D = array([[7,9,-9]]) E = array([[3],[2],[1]]) F = array([[8,6,3],[-4,3,2],[6,6,6]]) G = [1, 2, 3] H = np.ones((3, 4)) J = H.T K = array([[0]]) L = array([[[1,2],[0,1]]]) # Some arrays can't be cast as spmatrices (A,C,L) so leave # them out. Bsp = self.spcreator(B) Dsp = self.spcreator(D) Esp = self.spcreator(E) Fsp = self.spcreator(F) Hsp = self.spcreator(H) Hspp = self.spcreator(H[0,None]) Jsp = self.spcreator(J) Jspp = self.spcreator(J[:,0,None]) Ksp = self.spcreator(K) matrices = [A, B, C, D, E, F, G, H, J, K, L] spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp, Ksp] # sparse/sparse for i in spmatrices: for j in spmatrices: try: dense_mult = i.toarray() * j.toarray() except ValueError: assert_raises(ValueError, i.multiply, j) continue sp_mult = i.multiply(j) assert_almost_equal(sp_mult.toarray(), dense_mult) # sparse/dense for i in spmatrices: for j in matrices: try: dense_mult = i.toarray() * j except TypeError: continue except ValueError: assert_raises(ValueError, i.multiply, j) continue sp_mult = i.multiply(j) if issparse(sp_mult): assert_almost_equal(sp_mult.toarray(), dense_mult) else: assert_almost_equal(sp_mult, dense_mult) def test_elementwise_divide(self): expected = [[1,np.nan,np.nan,1], [1,np.nan,1,np.nan], [np.nan,1,np.nan,np.nan]] assert_array_equal(toarray(self.datsp / self.datsp), expected) denom = self.spcreator(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) expected = [[1,np.nan,np.nan,0.5], [-3,np.nan,inf,np.nan], [np.nan,0.25,np.nan,0]] assert_array_equal(toarray(self.datsp / denom), expected) # complex A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) Asp = self.spcreator(A) Bsp = self.spcreator(B) assert_almost_equal(toarray(Asp / Bsp), A/B) # integer A = array([[1,2,3],[-3,2,1]]) B = array([[0,1,2],[0,-2,3]]) Asp = self.spcreator(A) Bsp = self.spcreator(B) with np.errstate(divide='ignore'): assert_array_equal(toarray(Asp / Bsp), A / B) # mismatching sparsity patterns A = array([[0,1],[1,0]]) B = array([[1,0],[1,0]]) Asp = self.spcreator(A) Bsp = self.spcreator(B) with np.errstate(divide='ignore', invalid='ignore'): assert_array_equal(np.array(toarray(Asp / Bsp)), A / B) def test_pow(self): A = array([[1, 0, 2, 0], [0, 3, 4, 0], [0, 5, 0, 0], [0, 6, 7, 8]]) B = self.spcreator(A) for exponent in [0,1,2,3]: ret_sp = B**exponent ret_np = np.linalg.matrix_power(A, exponent) assert_array_equal(ret_sp.toarray(), ret_np) assert_equal(ret_sp.dtype, ret_np.dtype) # invalid exponents for exponent in [-1, 2.2, 1 + 3j]: assert_raises(Exception, B.__pow__, exponent) # nonsquare matrix B = self.spcreator(A[:3,:]) assert_raises(Exception, B.__pow__, 1) def test_rmatvec(self): M = self.spcreator(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) assert_array_almost_equal([1,2,3,4] @ M, dot([1,2,3,4], M.toarray())) row = array([[1,2,3,4]]) assert_array_almost_equal(row @ M, row @ M.toarray()) def test_small_multiplication(self): # test that A*x works for x with shape () (1,) (1,1) and (1,0) A = self.spcreator([[1],[2],[3]]) assert_(issparse(A * array(1))) assert_equal((A * array(1)).toarray(), [[1], [2], [3]]) assert_equal(A @ array([1]), array([1, 2, 3])) assert_equal(A @ array([[1]]), array([[1], [2], [3]])) assert_equal(A @ np.ones((1, 1)), array([[1], [2], [3]])) assert_equal(A @ np.ones((1, 0)), np.ones((3, 0))) def test_start_vs_at_sign_for_sparray_and_spmatrix(self): # test that * is matmul for spmatrix and mul for sparray A = self.spcreator([[1],[2],[3]]) if A._is_array: assert_array_almost_equal(A * np.ones((3,1)), A) assert_array_almost_equal(A * array([[1]]), A) assert_array_almost_equal(A * np.ones((3,1)), A) else: assert_equal(A * array([1]), array([1, 2, 3])) assert_equal(A * array([[1]]), array([[1], [2], [3]])) assert_equal(A * np.ones((1, 0)), np.ones((3, 0))) def test_binop_custom_type(self): # Non-regression test: previously, binary operations would raise # NotImplementedError instead of returning NotImplemented # (https://docs.python.org/library/constants.html#NotImplemented) # so overloading Custom + matrix etc. didn't work. A = self.spcreator([[1], [2], [3]]) B = BinopTester() assert_equal(A + B, "matrix on the left") assert_equal(A - B, "matrix on the left") assert_equal(A * B, "matrix on the left") assert_equal(B + A, "matrix on the right") assert_equal(B - A, "matrix on the right") assert_equal(B * A, "matrix on the right") assert_equal(A @ B, "matrix on the left") assert_equal(B @ A, "matrix on the right") def test_binop_custom_type_with_shape(self): A = self.spcreator([[1], [2], [3]]) B = BinopTester_with_shape((3,1)) assert_equal(A + B, "matrix on the left") assert_equal(A - B, "matrix on the left") assert_equal(A * B, "matrix on the left") assert_equal(B + A, "matrix on the right") assert_equal(B - A, "matrix on the right") assert_equal(B * A, "matrix on the right") assert_equal(A @ B, "matrix on the left") assert_equal(B @ A, "matrix on the right") def test_dot_scalar(self): M = self.spcreator(array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) scalar = 10 actual = M.dot(scalar) expected = M * scalar assert_allclose(actual.toarray(), expected.toarray()) def test_matmul(self): M = self.spcreator(array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) B = self.spcreator(array([[0,1],[1,0],[0,2]],'d')) col = array([[1,2,3]]).T matmul = operator.matmul # check matrix-vector assert_array_almost_equal(matmul(M, col), M.toarray() @ col) # check matrix-matrix assert_array_almost_equal(matmul(M, B).toarray(), (M @ B).toarray()) assert_array_almost_equal(matmul(M.toarray(), B), (M @ B).toarray()) assert_array_almost_equal(matmul(M, B.toarray()), (M @ B).toarray()) if not M._is_array: assert_array_almost_equal(matmul(M, B).toarray(), (M * B).toarray()) assert_array_almost_equal(matmul(M.toarray(), B), (M * B).toarray()) assert_array_almost_equal(matmul(M, B.toarray()), (M * B).toarray()) # check error on matrix-scalar assert_raises(ValueError, matmul, M, 1) assert_raises(ValueError, matmul, 1, M) def test_matvec(self): M = self.spcreator(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) col = array([[1,2,3]]).T assert_array_almost_equal(M @ col, M.toarray() @ col) # check result dimensions (ticket #514) assert_equal((M @ array([1,2,3])).shape,(4,)) assert_equal((M @ array([[1],[2],[3]])).shape,(4,1)) assert_equal((M @ matrix([[1],[2],[3]])).shape,(4,1)) # check result type assert_(isinstance(M @ array([1,2,3]), ndarray)) assert_(isinstance(M @ matrix([1,2,3]).T, np.matrix)) # ensure exception is raised for improper dimensions bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]), matrix([1,2,3]), matrix([[1],[2]])] for x in bad_vecs: assert_raises(ValueError, M.__mul__, x) # The current relationship between sparse matrix products and array # products is as follows: assert_array_almost_equal(M@array([1,2,3]), dot(M.toarray(),[1,2,3])) assert_array_almost_equal(M@[[1],[2],[3]], asmatrix(dot(M.toarray(),[1,2,3])).T) # Note that the result of M * x is dense if x has a singleton dimension. # Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col) # is rank-2. Is this desirable? def test_matmat_sparse(self): a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) b = matrix([[0,1],[1,0],[0,2]],'d') asp = self.spcreator(a) bsp = self.spcreator(b) assert_array_almost_equal((asp @ bsp).toarray(), a @ b) assert_array_almost_equal(asp @ b, a @ b) assert_array_almost_equal(a @ bsp, a @ b) assert_array_almost_equal(a2 @ bsp, a @ b) # Now try performing cross-type multplication: csp = bsp.tocsc() c = b want = a @ c assert_array_almost_equal((asp @ csp).toarray(), want) assert_array_almost_equal(asp @ c, want) assert_array_almost_equal(a @ csp, want) assert_array_almost_equal(a2 @ csp, want) csp = bsp.tocsr() assert_array_almost_equal((asp @ csp).toarray(), want) assert_array_almost_equal(asp @ c, want) assert_array_almost_equal(a @ csp, want) assert_array_almost_equal(a2 @ csp, want) csp = bsp.tocoo() assert_array_almost_equal((asp @ csp).toarray(), want) assert_array_almost_equal(asp @ c, want) assert_array_almost_equal(a @ csp, want) assert_array_almost_equal(a2 @ csp, want) # Test provided by Andy Fraser, 2006-03-26 L = 30 frac = .3 random.seed(0) # make runs repeatable A = zeros((L,2)) for i in range(L): for j in range(2): r = random.random() if r < frac: A[i,j] = r/frac A = self.spcreator(A) B = A @ A.T assert_array_almost_equal(B.toarray(), A.toarray() @ A.T.toarray()) assert_array_almost_equal(B.toarray(), A.toarray() @ A.toarray().T) # check dimension mismatch 2x2 times 3x2 A = self.spcreator([[1,2],[3,4]]) B = self.spcreator([[1,2],[3,4],[5,6]]) assert_raises(ValueError, A.__matmul__, B) if A._is_array: assert_raises(ValueError, A.__mul__, B) def test_matmat_dense(self): a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) asp = self.spcreator(a) # check both array and matrix types bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])] for b in bs: result = asp @ b assert_(isinstance(result, type(b))) assert_equal(result.shape, (4,2)) assert_equal(result, dot(a,b)) def test_sparse_format_conversions(self): A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]]) D = A.toarray() A = self.spcreator(A) for format in ['bsr','coo','csc','csr','dia','dok','lil']: a = A.asformat(format) assert_equal(a.format,format) assert_array_equal(a.toarray(), D) b = self.spcreator(D+3j).asformat(format) assert_equal(b.format,format) assert_array_equal(b.toarray(), D+3j) c = eval(format + '_matrix')(A) assert_equal(c.format,format) assert_array_equal(c.toarray(), D) for format in ['array', 'dense']: a = A.asformat(format) assert_array_equal(a, D) b = self.spcreator(D+3j).asformat(format) assert_array_equal(b, D+3j) def test_tobsr(self): x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]]) y = array([[0,1,2],[3,0,5]]) A = kron(x,y) Asp = self.spcreator(A) for format in ['bsr']: fn = getattr(Asp, 'to' + format) for X in [1, 2, 3, 6]: for Y in [1, 2, 3, 4, 6, 12]: assert_equal(fn(blocksize=(X, Y)).toarray(), A) def test_transpose(self): dat_1 = self.dat dat_2 = np.array([[]]) matrices = [dat_1, dat_2] def check(dtype, j): dat = array(matrices[j], dtype=dtype) datsp = self.spcreator(dat) a = datsp.transpose() b = dat.transpose() assert_array_equal(a.toarray(), b) assert_array_equal(a.transpose().toarray(), dat) assert_equal(a.dtype, b.dtype) # See gh-5987 empty = self.spcreator((3, 4)) assert_array_equal(np.transpose(empty).toarray(), np.transpose(zeros((3, 4)))) assert_array_equal(empty.T.toarray(), zeros((4, 3))) assert_raises(ValueError, empty.transpose, axes=0) for dtype in self.checked_dtypes: for j in range(len(matrices)): check(dtype, j) def test_add_dense(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] # adding a dense matrix to a sparse matrix sum1 = dat + datsp assert_array_equal(sum1, dat + dat) sum2 = datsp + dat assert_array_equal(sum2, dat + dat) for dtype in self.math_dtypes: check(dtype) def test_sub_dense(self): # subtracting a dense matrix to/from a sparse matrix def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] # Behavior is different for bool. if dat.dtype == bool: sum1 = dat - datsp assert_array_equal(sum1, dat - dat) sum2 = datsp - dat assert_array_equal(sum2, dat - dat) else: # Manually add to avoid upcasting from scalar # multiplication. sum1 = (dat + dat + dat) - datsp assert_array_equal(sum1, dat + dat) sum2 = (datsp + datsp + datsp) - dat assert_array_equal(sum2, dat + dat) for dtype in self.math_dtypes: if dtype == np.dtype('bool'): # boolean array subtraction deprecated in 1.9.0 continue check(dtype) def test_maximum_minimum(self): A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]]) B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]]) A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]]) def check(dtype, dtype2, btype): if np.issubdtype(dtype, np.complexfloating): A = self.spcreator(A_dense_cpx.astype(dtype)) else: A = self.spcreator(A_dense.astype(dtype)) if btype == 'scalar': B = dtype2.type(1) elif btype == 'scalar2': B = dtype2.type(-1) elif btype == 'dense': B = B_dense.astype(dtype2) elif btype == 'sparse': B = self.spcreator(B_dense.astype(dtype2)) else: raise ValueError() with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Taking maximum .minimum. with > 0 .< 0. number results to a dense matrix") max_s = A.maximum(B) min_s = A.minimum(B) max_d = np.maximum(toarray(A), toarray(B)) assert_array_equal(toarray(max_s), max_d) assert_equal(max_s.dtype, max_d.dtype) min_d = np.minimum(toarray(A), toarray(B)) assert_array_equal(toarray(min_s), min_d) assert_equal(min_s.dtype, min_d.dtype) for dtype in self.math_dtypes: for dtype2 in [np.int8, np.float_, np.complex_]: for btype in ['scalar', 'scalar2', 'dense', 'sparse']: check(np.dtype(dtype), np.dtype(dtype2), btype) def test_copy(self): # Check whether the copy=True and copy=False keywords work A = self.datsp # check that copy preserves format assert_equal(A.copy().format, A.format) assert_equal(A.__class__(A,copy=True).format, A.format) assert_equal(A.__class__(A,copy=False).format, A.format) assert_equal(A.copy().toarray(), A.toarray()) assert_equal(A.__class__(A, copy=True).toarray(), A.toarray()) assert_equal(A.__class__(A, copy=False).toarray(), A.toarray()) # check that XXX_matrix.toXXX() works toself = getattr(A,'to' + A.format) assert_(toself() is A) assert_(toself(copy=False) is A) assert_equal(toself(copy=True).format, A.format) assert_equal(toself(copy=True).toarray(), A.toarray()) # check whether the data is copied? assert_(not sparse_may_share_memory(A.copy(), A)) # test that __iter__ is compatible with NumPy matrix def test_iterator(self): B = matrix(np.arange(50).reshape(5, 10)) A = self.spcreator(B) for x, y in zip(A, B): assert_equal(x.toarray(), y) def test_size_zero_matrix_arithmetic(self): # Test basic matrix arithmetic with shapes like (0,0), (10,0), # (0, 3), etc. mat = array([]) a = mat.reshape((0, 0)) b = mat.reshape((0, 1)) c = mat.reshape((0, 5)) d = mat.reshape((1, 0)) e = mat.reshape((5, 0)) f = np.ones([5, 5]) asp = self.spcreator(a) bsp = self.spcreator(b) csp = self.spcreator(c) dsp = self.spcreator(d) esp = self.spcreator(e) fsp = self.spcreator(f) # matrix product. assert_array_equal(asp.dot(asp).toarray(), np.dot(a, a)) assert_array_equal(bsp.dot(dsp).toarray(), np.dot(b, d)) assert_array_equal(dsp.dot(bsp).toarray(), np.dot(d, b)) assert_array_equal(csp.dot(esp).toarray(), np.dot(c, e)) assert_array_equal(csp.dot(fsp).toarray(), np.dot(c, f)) assert_array_equal(esp.dot(csp).toarray(), np.dot(e, c)) assert_array_equal(dsp.dot(csp).toarray(), np.dot(d, c)) assert_array_equal(fsp.dot(esp).toarray(), np.dot(f, e)) # bad matrix products assert_raises(ValueError, dsp.dot, e) assert_raises(ValueError, asp.dot, d) # elemente-wise multiplication assert_array_equal(asp.multiply(asp).toarray(), np.multiply(a, a)) assert_array_equal(bsp.multiply(bsp).toarray(), np.multiply(b, b)) assert_array_equal(dsp.multiply(dsp).toarray(), np.multiply(d, d)) assert_array_equal(asp.multiply(a).toarray(), np.multiply(a, a)) assert_array_equal(bsp.multiply(b).toarray(), np.multiply(b, b)) assert_array_equal(dsp.multiply(d).toarray(), np.multiply(d, d)) assert_array_equal(asp.multiply(6).toarray(), np.multiply(a, 6)) assert_array_equal(bsp.multiply(6).toarray(), np.multiply(b, 6)) assert_array_equal(dsp.multiply(6).toarray(), np.multiply(d, 6)) # bad element-wise multiplication assert_raises(ValueError, asp.multiply, c) assert_raises(ValueError, esp.multiply, c) # Addition assert_array_equal(asp.__add__(asp).toarray(), a.__add__(a)) assert_array_equal(bsp.__add__(bsp).toarray(), b.__add__(b)) assert_array_equal(dsp.__add__(dsp).toarray(), d.__add__(d)) # bad addition assert_raises(ValueError, asp.__add__, dsp) assert_raises(ValueError, bsp.__add__, asp) def test_size_zero_conversions(self): mat = array([]) a = mat.reshape((0, 0)) b = mat.reshape((0, 5)) c = mat.reshape((5, 0)) for m in [a, b, c]: spm = self.spcreator(m) assert_array_equal(spm.tocoo().toarray(), m) assert_array_equal(spm.tocsr().toarray(), m) assert_array_equal(spm.tocsc().toarray(), m) assert_array_equal(spm.tolil().toarray(), m) assert_array_equal(spm.todok().toarray(), m) assert_array_equal(spm.tobsr().toarray(), m) def test_pickle(self): import pickle sup = suppress_warnings() sup.filter(SparseEfficiencyWarning) @sup def check(): datsp = self.datsp.copy() for protocol in range(pickle.HIGHEST_PROTOCOL): sploaded = pickle.loads(pickle.dumps(datsp, protocol=protocol)) assert_equal(datsp.shape, sploaded.shape) assert_array_equal(datsp.toarray(), sploaded.toarray()) assert_equal(datsp.format, sploaded.format) for key, val in datsp.__dict__.items(): if isinstance(val, np.ndarray): assert_array_equal(val, sploaded.__dict__[key]) else: assert_(val == sploaded.__dict__[key]) check() def test_unary_ufunc_overrides(self): def check(name): if name == "sign": pytest.skip("sign conflicts with comparison op " "support on Numpy") if self.spcreator in (dok_matrix, lil_matrix): pytest.skip("Unary ops not implemented for dok/lil") ufunc = getattr(np, name) X = self.spcreator(np.arange(20).reshape(4, 5) / 20.) X0 = ufunc(X.toarray()) X2 = ufunc(X) assert_array_equal(X2.toarray(), X0) for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt", "abs"]: check(name) def test_resize(self): # resize(shape) resizes the matrix in-place D = np.array([[1, 0, 3, 4], [2, 0, 0, 0], [3, 0, 0, 0]]) S = self.spcreator(D) assert_(S.resize((3, 2)) is None) assert_array_equal(S.toarray(), [[1, 0], [2, 0], [3, 0]]) S.resize((2, 2)) assert_array_equal(S.toarray(), [[1, 0], [2, 0]]) S.resize((3, 2)) assert_array_equal(S.toarray(), [[1, 0], [2, 0], [0, 0]]) S.resize((3, 3)) assert_array_equal(S.toarray(), [[1, 0, 0], [2, 0, 0], [0, 0, 0]]) # test no-op S.resize((3, 3)) assert_array_equal(S.toarray(), [[1, 0, 0], [2, 0, 0], [0, 0, 0]]) # test *args S.resize(3, 2) assert_array_equal(S.toarray(), [[1, 0], [2, 0], [0, 0]]) for bad_shape in [1, (-1, 2), (2, -1), (1, 2, 3)]: assert_raises(ValueError, S.resize, bad_shape) def test_constructor1_base(self): A = self.datsp self_format = A.format C = A.__class__(A, copy=False) assert_array_equal_dtype(A.toarray(), C.toarray()) if self_format not in NON_ARRAY_BACKED_FORMATS: assert_(sparse_may_share_memory(A, C)) C = A.__class__(A, dtype=A.dtype, copy=False) assert_array_equal_dtype(A.toarray(), C.toarray()) if self_format not in NON_ARRAY_BACKED_FORMATS: assert_(sparse_may_share_memory(A, C)) C = A.__class__(A, dtype=np.float32, copy=False) assert_array_equal(A.toarray(), C.toarray()) C = A.__class__(A, copy=True) assert_array_equal_dtype(A.toarray(), C.toarray()) assert_(not sparse_may_share_memory(A, C)) for other_format in ['csr', 'csc', 'coo', 'dia', 'dok', 'lil']: if other_format == self_format: continue B = A.asformat(other_format) C = A.__class__(B, copy=False) assert_array_equal_dtype(A.toarray(), C.toarray()) C = A.__class__(B, copy=True) assert_array_equal_dtype(A.toarray(), C.toarray()) assert_(not sparse_may_share_memory(B, C)) class _TestInplaceArithmetic: def test_inplace_dense(self): a = np.ones((3, 4)) b = self.spcreator(a) x = a.copy() y = a.copy() x += a y += b assert_array_equal(x, y) x = a.copy() y = a.copy() x -= a y -= b assert_array_equal(x, y) x = a.copy() y = a.copy() if b._is_array: assert_raises(ValueError, operator.imul, x, b.T) x = x * a y *= b else: # This is matrix product, from __rmul__ assert_raises(ValueError, operator.imul, x, b) x = x.dot(a.T) y *= b.T assert_array_equal(x, y) # Matrix (non-elementwise) floor division is not defined assert_raises(TypeError, operator.ifloordiv, x, b) def test_imul_scalar(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] # Avoid implicit casting. if np.can_cast(int, dtype, casting='same_kind'): a = datsp.copy() a *= 2 b = dat.copy() b *= 2 assert_array_equal(b, a.toarray()) if np.can_cast(float, dtype, casting='same_kind'): a = datsp.copy() a *= 17.3 b = dat.copy() b *= 17.3 assert_array_equal(b, a.toarray()) for dtype in self.math_dtypes: check(dtype) def test_idiv_scalar(self): def check(dtype): dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] if np.can_cast(int, dtype, casting='same_kind'): a = datsp.copy() a /= 2 b = dat.copy() b /= 2 assert_array_equal(b, a.toarray()) if np.can_cast(float, dtype, casting='same_kind'): a = datsp.copy() a /= 17.3 b = dat.copy() b /= 17.3 assert_array_equal(b, a.toarray()) for dtype in self.math_dtypes: # /= should only be used with float dtypes to avoid implicit # casting. if not np.can_cast(dtype, np.int_): check(dtype) def test_inplace_success(self): # Inplace ops should work even if a specialized version is not # implemented, falling back to x = x <op> y a = self.spcreator(np.eye(5)) b = self.spcreator(np.eye(5)) bp = self.spcreator(np.eye(5)) b += a bp = bp + a assert_allclose(b.toarray(), bp.toarray()) b *= a bp = bp * a assert_allclose(b.toarray(), bp.toarray()) b -= a bp = bp - a assert_allclose(b.toarray(), bp.toarray()) assert_raises(TypeError, operator.ifloordiv, a, b) class _TestGetSet: def test_getelement(self): def check(dtype): D = array([[1,0,0], [4,3,0], [0,2,0], [0,0,0]], dtype=dtype) A = self.spcreator(D) M,N = D.shape for i in range(-M, M): for j in range(-N, N): assert_equal(A[i,j], D[i,j]) assert_equal(type(A[1,1]), dtype) for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: assert_raises((IndexError, TypeError), A.__getitem__, ij) for dtype in supported_dtypes: check(np.dtype(dtype)) def test_setelement(self): def check(dtype): A = self.spcreator((3,4), dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[0, 0] = dtype.type(0) # bug 870 A[1, 2] = dtype.type(4.0) A[0, 1] = dtype.type(3) A[2, 0] = dtype.type(2.0) A[0,-1] = dtype.type(8) A[-1,-2] = dtype.type(7) A[0, 1] = dtype.type(5) if dtype != np.bool_: assert_array_equal( A.toarray(), [ [0, 5, 0, 8], [0, 0, 4, 0], [2, 0, 7, 0] ] ) for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: assert_raises(IndexError, A.__setitem__, ij, 123.0) for v in [[1,2,3], array([1,2,3])]: assert_raises(ValueError, A.__setitem__, (0,0), v) if (not np.issubdtype(dtype, np.complexfloating) and dtype != np.bool_): for v in [3j]: assert_raises(TypeError, A.__setitem__, (0,0), v) for dtype in supported_dtypes: check(np.dtype(dtype)) def test_negative_index_assignment(self): # Regression test for github issue 4428. def check(dtype): A = self.spcreator((3, 10), dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[0, -4] = 1 assert_equal(A[0, -4], 1) for dtype in self.math_dtypes: check(np.dtype(dtype)) def test_scalar_assign_2(self): n, m = (5, 10) def _test_set(i, j, nitems): msg = f"{i!r} ; {j!r} ; {nitems!r}" A = self.spcreator((n, m)) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[i, j] = 1 assert_almost_equal(A.sum(), nitems, err_msg=msg) assert_almost_equal(A[i, j], 1, err_msg=msg) # [i,j] for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)), (array(-1), array(-2))]: _test_set(i, j, 1) def test_index_scalar_assign(self): A = self.spcreator((5, 5)) B = np.zeros((5, 5)) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") for C in [A, B]: C[0,1] = 1 C[3,0] = 4 C[3,0] = 9 assert_array_equal(A.toarray(), B) class _TestSolve: def test_solve(self): # Test whether the lu_solve command segfaults, as reported by Nils # Wagner for a 64-bit machine, 02 March 2005 (EJS) n = 20 np.random.seed(0) # make tests repeatable A = zeros((n,n), dtype=complex) x = np.random.rand(n) y = np.random.rand(n-1)+1j*np.random.rand(n-1) r = np.random.rand(n) for i in range(len(x)): A[i,i] = x[i] for i in range(len(y)): A[i,i+1] = y[i] A[i+1,i] = conjugate(y[i]) A = self.spcreator(A) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu converted its input to CSC format") x = splu(A).solve(r) assert_almost_equal(A @ x,r) class _TestSlicing: def test_dtype_preservation(self): assert_equal(self.spcreator((1,10), dtype=np.int16)[0,1:5].dtype, np.int16) assert_equal(self.spcreator((1,10), dtype=np.int32)[0,1:5].dtype, np.int32) assert_equal(self.spcreator((1,10), dtype=np.float32)[0,1:5].dtype, np.float32) assert_equal(self.spcreator((1,10), dtype=np.float64)[0,1:5].dtype, np.float64) def test_dtype_preservation_empty_slice(self): # This should be parametrized with pytest, but something in the parent # class creation used in this file breaks pytest.mark.parametrize. for dt in [np.int16, np.int32, np.float32, np.float64]: A = self.spcreator((3, 2), dtype=dt) assert_equal(A[:, 0:0:2].dtype, dt) assert_equal(A[0:0:2, :].dtype, dt) assert_equal(A[0, 0:0:2].dtype, dt) assert_equal(A[0:0:2, 0].dtype, dt) def test_get_horiz_slice(self): B = asmatrix(arange(50.).reshape(5,10)) A = self.spcreator(B) assert_array_equal(B[1, :], A[1, :].toarray()) assert_array_equal(B[1, 2:5], A[1, 2:5].toarray()) C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) D = self.spcreator(C) assert_array_equal(C[1, 1:3], D[1, 1:3].toarray()) # Now test slicing when a row contains only zeros E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) F = self.spcreator(E) assert_array_equal(E[1, 1:3], F[1, 1:3].toarray()) assert_array_equal(E[2, -2:], F[2, -2:].A) # The following should raise exceptions: assert_raises(IndexError, A.__getitem__, (slice(None), 11)) assert_raises(IndexError, A.__getitem__, (6, slice(3, 7))) def test_get_vert_slice(self): B = arange(50.).reshape(5, 10) A = self.spcreator(B) assert_array_equal(B[2:5, [0]], A[2:5, 0].toarray()) assert_array_equal(B[:, [1]], A[:, 1].toarray()) C = array([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) D = self.spcreator(C) assert_array_equal(C[1:3, [1]], D[1:3, 1].toarray()) assert_array_equal(C[:, [2]], D[:, 2].toarray()) # Now test slicing when a column contains only zeros E = array([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) F = self.spcreator(E) assert_array_equal(E[:, [1]], F[:, 1].toarray()) assert_array_equal(E[-2:, [2]], F[-2:, 2].toarray()) # The following should raise exceptions: assert_raises(IndexError, A.__getitem__, (slice(None), 11)) assert_raises(IndexError, A.__getitem__, (6, slice(3, 7))) def test_get_slices(self): B = arange(50.).reshape(5, 10) A = self.spcreator(B) assert_array_equal(A[2:5, 0:3].toarray(), B[2:5, 0:3]) assert_array_equal(A[1:, :-1].toarray(), B[1:, :-1]) assert_array_equal(A[:-1, 1:].toarray(), B[:-1, 1:]) # Now test slicing when a column contains only zeros E = array([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) F = self.spcreator(E) assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].toarray()) assert_array_equal(E[:, 1:], F[:, 1:].toarray()) def test_non_unit_stride_2d_indexing(self): # Regression test -- used to silently ignore the stride. v0 = np.random.rand(50, 50) try: v = self.spcreator(v0)[0:25:2, 2:30:3] except ValueError: # if unsupported raise pytest.skip("feature not implemented") assert_array_equal(v.toarray(), v0[0:25:2, 2:30:3]) def test_slicing_2(self): B = asmatrix(arange(50).reshape(5,10)) A = self.spcreator(B) # [i,j] assert_equal(A[2,3], B[2,3]) assert_equal(A[-1,8], B[-1,8]) assert_equal(A[-1,-2],B[-1,-2]) assert_equal(A[array(-1),-2],B[-1,-2]) assert_equal(A[-1,array(-2)],B[-1,-2]) assert_equal(A[array(-1),array(-2)],B[-1,-2]) # [i,1:2] assert_equal(A[2, :].toarray(), B[2, :]) assert_equal(A[2, 5:-2].toarray(), B[2, 5:-2]) assert_equal(A[array(2), 5:-2].toarray(), B[2, 5:-2]) # [1:2,j] assert_equal(A[:, 2].toarray(), B[:, 2]) assert_equal(A[3:4, 9].toarray(), B[3:4, 9]) assert_equal(A[1:4, -5].toarray(), B[1:4, -5]) assert_equal(A[2:-1, 3].toarray(), B[2:-1, 3]) assert_equal(A[2:-1, array(3)].toarray(), B[2:-1, 3]) # [1:2,1:2] assert_equal(A[1:2, 1:2].toarray(), B[1:2, 1:2]) assert_equal(A[4:, 3:].toarray(), B[4:, 3:]) assert_equal(A[:4, :5].toarray(), B[:4, :5]) assert_equal(A[2:-1, :5].toarray(), B[2:-1, :5]) # [i] assert_equal(A[1, :].toarray(), B[1, :]) assert_equal(A[-2, :].toarray(), B[-2, :]) assert_equal(A[array(-2), :].toarray(), B[-2, :]) # [1:2] assert_equal(A[1:4].toarray(), B[1:4]) assert_equal(A[1:-2].toarray(), B[1:-2]) # Check bug reported by Robert Cimrman: # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link) s = slice(int8(2),int8(4),None) assert_equal(A[s, :].toarray(), B[2:4, :]) assert_equal(A[:, s].toarray(), B[:, 2:4]) def test_slicing_3(self): B = asmatrix(arange(50).reshape(5,10)) A = self.spcreator(B) s_ = np.s_ slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], s_[15:20], s_[3:2], s_[8:3:-1], s_[4::-2], s_[:5:-1], 0, 1, s_[:], s_[1:5], -1, -2, -5, array(-1), np.int8(-3)] def check_1(a): x = A[a] y = B[a] if y.shape == (): assert_equal(x, y, repr(a)) else: if x.size == 0 and y.size == 0: pass else: assert_array_equal(x.toarray(), y, repr(a)) for j, a in enumerate(slices): check_1(a) def check_2(a, b): # Indexing np.matrix with 0-d arrays seems to be broken, # as they seem not to be treated as scalars. # https://github.com/numpy/numpy/issues/3110 if isinstance(a, np.ndarray): ai = int(a) else: ai = a if isinstance(b, np.ndarray): bi = int(b) else: bi = b x = A[a, b] y = B[ai, bi] if y.shape == (): assert_equal(x, y, repr((a, b))) else: if x.size == 0 and y.size == 0: pass else: assert_array_equal(x.toarray(), y, repr((a, b))) for i, a in enumerate(slices): for j, b in enumerate(slices): check_2(a, b) # Check out of bounds etc. systematically extra_slices = [] for a, b, c in itertools.product(*([(None, 0, 1, 2, 5, 15, -1, -2, 5, -15)]*3)): if c == 0: continue extra_slices.append(slice(a, b, c)) for a in extra_slices: check_2(a, a) check_2(a, -2) check_2(-2, a) def test_ellipsis_slicing(self): b = asmatrix(arange(50).reshape(5,10)) a = self.spcreator(b) assert_array_equal(a[...].toarray(), b[...].A) assert_array_equal(a[...,].toarray(), b[...,].A) assert_array_equal(a[1, ...].toarray(), b[1, ...].A) assert_array_equal(a[..., 1].toarray(), b[..., 1].A) assert_array_equal(a[1:, ...].toarray(), b[1:, ...].A) assert_array_equal(a[..., 1:].toarray(), b[..., 1:].A) assert_array_equal(a[1:, 1, ...].toarray(), b[1:, 1, ...].A) assert_array_equal(a[1, ..., 1:].toarray(), b[1, ..., 1:].A) # These return ints assert_equal(a[1, 1, ...], b[1, 1, ...]) assert_equal(a[1, ..., 1], b[1, ..., 1]) def test_multiple_ellipsis_slicing(self): b = asmatrix(arange(50).reshape(5,10)) a = self.spcreator(b) with pytest.deprecated_call(match='removed in v1.13'): assert_array_equal(a[..., ...].toarray(), b[:, :].A) with pytest.deprecated_call(match='removed in v1.13'): assert_array_equal(a[..., ..., ...].toarray(), b[:, :].A) with pytest.deprecated_call(match='removed in v1.13'): assert_array_equal(a[1, ..., ...].toarray(), b[1, :].A) with pytest.deprecated_call(match='removed in v1.13'): assert_array_equal(a[1:, ..., ...].toarray(), b[1:, :].A) with pytest.deprecated_call(match='removed in v1.13'): assert_array_equal(a[..., ..., 1:].toarray(), b[:, 1:].A) with pytest.deprecated_call(match='removed in v1.13'): assert_array_equal(a[..., ..., 1].toarray(), b[:, 1].A) class _TestSlicingAssign: def test_slice_scalar_assign(self): A = self.spcreator((5, 5)) B = np.zeros((5, 5)) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") for C in [A, B]: C[0:1,1] = 1 C[3:0,0] = 4 C[3:4,0] = 9 C[0,4:] = 1 C[3::-1,4:] = 9 assert_array_equal(A.toarray(), B) def test_slice_assign_2(self): n, m = (5, 10) def _test_set(i, j): msg = f"i={i!r}; j={j!r}" A = self.spcreator((n, m)) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[i, j] = 1 B = np.zeros((n, m)) B[i, j] = 1 assert_array_almost_equal(A.toarray(), B, err_msg=msg) # [i,1:2] for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)), (array(2), slice(5, -2))]: _test_set(i, j) def test_self_self_assignment(self): # Tests whether a row of one lil_matrix can be assigned to # another. B = self.spcreator((4,3)) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") B[0,0] = 2 B[1,2] = 7 B[2,1] = 3 B[3,0] = 10 A = B / 10 B[0,:] = A[0,:] assert_array_equal(A[0,:].A, B[0,:].A) A = B / 10 B[:,:] = A[:1,:1] assert_array_equal(np.zeros((4,3)) + A[0,0], B.A) A = B / 10 B[:-1,0] = A[0,:].T assert_array_equal(A[0,:].A.T, B[:-1,0].A) def test_slice_assignment(self): B = self.spcreator((4,3)) expected = array([[10,0,0], [0,0,6], [0,14,0], [0,0,0]]) block = [[1,0],[0,4]] with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") B[0,0] = 5 B[1,2] = 3 B[2,1] = 7 B[:,:] = B+B assert_array_equal(B.toarray(), expected) B[:2,:2] = csc_matrix(array(block)) assert_array_equal(B.toarray()[:2, :2], block) def test_sparsity_modifying_assignment(self): B = self.spcreator((4,3)) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") B[0,0] = 5 B[1,2] = 3 B[2,1] = 7 B[3,0] = 10 B[:3] = csr_matrix(np.eye(3)) expected = array([[1,0,0],[0,1,0],[0,0,1],[10,0,0]]) assert_array_equal(B.toarray(), expected) def test_set_slice(self): A = self.spcreator((5,10)) B = array(zeros((5, 10), float)) s_ = np.s_ slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], s_[8:3:-1], s_[4::-2], s_[:5:-1], 0, 1, s_[:], s_[1:5], -1, -2, -5, array(-1), np.int8(-3)] with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") for j, a in enumerate(slices): A[a] = j B[a] = j assert_array_equal(A.toarray(), B, repr(a)) for i, a in enumerate(slices): for j, b in enumerate(slices): A[a,b] = 10*i + 1000*(j+1) B[a,b] = 10*i + 1000*(j+1) assert_array_equal(A.toarray(), B, repr((a, b))) A[0, 1:10:2] = range(1, 10, 2) B[0, 1:10:2] = range(1, 10, 2) assert_array_equal(A.toarray(), B) A[1:5:2, 0] = np.arange(1, 5, 2)[:, None] B[1:5:2, 0] = np.arange(1, 5, 2)[:] assert_array_equal(A.toarray(), B) # The next commands should raise exceptions assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100))) assert_raises(ValueError, A.__setitem__, (0, 0), arange(100)) assert_raises(ValueError, A.__setitem__, (0, slice(None)), list(range(100))) assert_raises(ValueError, A.__setitem__, (slice(None), 1), list(range(100))) assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy()) assert_raises(ValueError, A.__setitem__, ([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4]) assert_raises(ValueError, A.__setitem__, ([[1, 2, 3], [0, 3, 4], [4, 1, 3]], [[1, 2, 4], [0, 1, 3]]), [2, 3, 4]) assert_raises(ValueError, A.__setitem__, (slice(4), 0), [[1, 2], [3, 4]]) def test_assign_empty(self): A = self.spcreator(np.ones((2, 3))) B = self.spcreator((1, 2)) A[1, :2] = B assert_array_equal(A.toarray(), [[1, 1, 1], [0, 0, 1]]) def test_assign_1d_slice(self): A = self.spcreator(np.ones((3, 3))) x = np.zeros(3) A[:, 0] = x A[1, :] = x assert_array_equal(A.toarray(), [[0, 1, 1], [0, 0, 0], [0, 1, 1]]) class _TestFancyIndexing: """Tests fancy indexing features. The tests for any matrix formats that implement these features should derive from this class. """ def test_dtype_preservation_empty_index(self): # This should be parametrized with pytest, but something in the parent # class creation used in this file breaks pytest.mark.parametrize. for dt in [np.int16, np.int32, np.float32, np.float64]: A = self.spcreator((3, 2), dtype=dt) assert_equal(A[:, [False, False]].dtype, dt) assert_equal(A[[False, False, False], :].dtype, dt) assert_equal(A[:, []].dtype, dt) assert_equal(A[[], :].dtype, dt) def test_bad_index(self): A = self.spcreator(np.zeros([5, 5])) assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo") assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo")) assert_raises((IndexError, ValueError), A.__getitem__, ([1, 2, 3], [1, 2, 3, 4])) def test_fancy_indexing(self): B = asmatrix(arange(50).reshape(5,10)) A = self.spcreator(B) # [i] assert_equal(A[[1, 3]].toarray(), B[[1, 3]]) # [i,[1,2]] assert_equal(A[3, [1, 3]].toarray(), B[3, [1, 3]]) assert_equal(A[-1, [2, -5]].toarray(), B[-1, [2, -5]]) assert_equal(A[array(-1), [2, -5]].toarray(), B[-1, [2, -5]]) assert_equal(A[-1, array([2, -5])].toarray(), B[-1, [2, -5]]) assert_equal(A[array(-1), array([2, -5])].toarray(), B[-1, [2, -5]]) # [1:2,[1,2]] assert_equal(A[:, [2, 8, 3, -1]].toarray(), B[:, [2, 8, 3, -1]]) assert_equal(A[3:4, [9]].toarray(), B[3:4, [9]]) assert_equal(A[1:4, [-1, -5]].toarray(), B[1:4, [-1, -5]]) assert_equal(A[1:4, array([-1, -5])].toarray(), B[1:4, [-1, -5]]) # [[1,2],j] assert_equal(A[[1, 3], 3].toarray(), B[[1, 3], 3]) assert_equal(A[[2, -5], -4].toarray(), B[[2, -5], -4]) assert_equal(A[array([2, -5]), -4].toarray(), B[[2, -5], -4]) assert_equal(A[[2, -5], array(-4)].toarray(), B[[2, -5], -4]) assert_equal(A[array([2, -5]), array(-4)].toarray(), B[[2, -5], -4]) # [[1,2],1:2] assert_equal(A[[1, 3], :].toarray(), B[[1, 3], :]) assert_equal(A[[2, -5], 8:-1].toarray(), B[[2, -5], 8:-1]) assert_equal(A[array([2, -5]), 8:-1].toarray(), B[[2, -5], 8:-1]) # [[1,2],[1,2]] assert_equal(toarray(A[[1, 3], [2, 4]]), B[[1, 3], [2, 4]]) assert_equal(toarray(A[[-1, -3], [2, -4]]), B[[-1, -3], [2, -4]]) assert_equal( toarray(A[array([-1, -3]), [2, -4]]), B[[-1, -3], [2, -4]] ) assert_equal( toarray(A[[-1, -3], array([2, -4])]), B[[-1, -3], [2, -4]] ) assert_equal( toarray(A[array([-1, -3]), array([2, -4])]), B[[-1, -3], [2, -4]] ) # [[[1],[2]],[1,2]] assert_equal(A[[[1], [3]], [2, 4]].toarray(), B[[[1], [3]], [2, 4]]) assert_equal( A[[[-1], [-3], [-2]], [2, -4]].toarray(), B[[[-1], [-3], [-2]], [2, -4]] ) assert_equal( A[array([[-1], [-3], [-2]]), [2, -4]].toarray(), B[[[-1], [-3], [-2]], [2, -4]] ) assert_equal( A[[[-1], [-3], [-2]], array([2, -4])].toarray(), B[[[-1], [-3], [-2]], [2, -4]] ) assert_equal( A[array([[-1], [-3], [-2]]), array([2, -4])].toarray(), B[[[-1], [-3], [-2]], [2, -4]] ) # [[1,2]] assert_equal(A[[1, 3]].toarray(), B[[1, 3]]) assert_equal(A[[-1, -3]].toarray(), B[[-1, -3]]) assert_equal(A[array([-1, -3])].toarray(), B[[-1, -3]]) # [[1,2],:][:,[1,2]] assert_equal( A[[1, 3], :][:, [2, 4]].toarray(), B[[1, 3], :][:, [2, 4]] ) assert_equal( A[[-1, -3], :][:, [2, -4]].toarray(), B[[-1, -3], :][:, [2, -4]] ) assert_equal( A[array([-1, -3]), :][:, array([2, -4])].toarray(), B[[-1, -3], :][:, [2, -4]] ) # [:,[1,2]][[1,2],:] assert_equal( A[:, [1, 3]][[2, 4], :].toarray(), B[:, [1, 3]][[2, 4], :] ) assert_equal( A[:, [-1, -3]][[2, -4], :].toarray(), B[:, [-1, -3]][[2, -4], :] ) assert_equal( A[:, array([-1, -3])][array([2, -4]), :].toarray(), B[:, [-1, -3]][[2, -4], :] ) # Check bug reported by Robert Cimrman: # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link) s = slice(int8(2),int8(4),None) assert_equal(A[s, :].toarray(), B[2:4, :]) assert_equal(A[:, s].toarray(), B[:, 2:4]) # Regression for gh-4917: index with tuple of 2D arrays i = np.array([[1]], dtype=int) assert_equal(A[i, i].toarray(), B[i, i]) # Regression for gh-4917: index with tuple of empty nested lists assert_equal(A[[[]], [[]]].toarray(), B[[[]], [[]]]) def test_fancy_indexing_randomized(self): np.random.seed(1234) # make runs repeatable NUM_SAMPLES = 50 M = 6 N = 4 D = asmatrix(np.random.rand(M,N)) D = np.multiply(D, D > 0.5) I = np.random.randint(-M + 1, M, size=NUM_SAMPLES) J = np.random.randint(-N + 1, N, size=NUM_SAMPLES) S = self.spcreator(D) SIJ = S[I,J] if issparse(SIJ): SIJ = SIJ.toarray() assert_equal(SIJ, D[I,J]) I_bad = I + M J_bad = J - N assert_raises(IndexError, S.__getitem__, (I_bad,J)) assert_raises(IndexError, S.__getitem__, (I,J_bad)) def test_fancy_indexing_boolean(self): np.random.seed(1234) # make runs repeatable B = asmatrix(arange(50).reshape(5,10)) A = self.spcreator(B) I = np.array(np.random.randint(0, 2, size=5), dtype=bool) J = np.array(np.random.randint(0, 2, size=10), dtype=bool) X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) assert_equal(toarray(A[I]), B[I]) assert_equal(toarray(A[:, J]), B[:, J]) assert_equal(toarray(A[X]), B[X]) assert_equal(toarray(A[B > 9]), B[B > 9]) I = np.array([True, False, True, True, False]) J = np.array([False, True, True, False, True, False, False, False, False, False]) assert_equal(toarray(A[I, J]), B[I, J]) Z1 = np.zeros((6, 11), dtype=bool) Z2 = np.zeros((6, 11), dtype=bool) Z2[0,-1] = True Z3 = np.zeros((6, 11), dtype=bool) Z3[-1,0] = True assert_equal(A[Z1], np.array([])) assert_raises(IndexError, A.__getitem__, Z2) assert_raises(IndexError, A.__getitem__, Z3) assert_raises((IndexError, ValueError), A.__getitem__, (X, 1)) def test_fancy_indexing_sparse_boolean(self): np.random.seed(1234) # make runs repeatable B = asmatrix(arange(50).reshape(5,10)) A = self.spcreator(B) X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) Xsp = csr_matrix(X) assert_equal(toarray(A[Xsp]), B[X]) assert_equal(toarray(A[A > 9]), B[B > 9]) Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool) Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool) Zsp = csr_matrix(Z) Ysp = csr_matrix(Y) assert_raises(IndexError, A.__getitem__, Zsp) assert_raises(IndexError, A.__getitem__, Ysp) assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1)) def test_fancy_indexing_regression_3087(self): mat = self.spcreator(array([[1, 0, 0], [0,1,0], [1,0,0]])) desired_cols = np.ravel(mat.sum(0)) > 0 assert_equal(mat[:, desired_cols].toarray(), [[1, 0], [0, 1], [1, 0]]) def test_fancy_indexing_seq_assign(self): mat = self.spcreator(array([[1, 0], [0, 1]])) assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2])) def test_fancy_indexing_2d_assign(self): # regression test for gh-10695 mat = self.spcreator(array([[1, 0], [2, 3]])) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure") mat[[0, 1], [1, 1]] = mat[[1, 0], [0, 0]] assert_equal(toarray(mat), array([[1, 2], [2, 1]])) def test_fancy_indexing_empty(self): B = asmatrix(arange(50).reshape(5,10)) B[1,:] = 0 B[:,2] = 0 B[3,6] = 0 A = self.spcreator(B) K = np.array([False, False, False, False, False]) assert_equal(toarray(A[K]), B[K]) K = np.array([], dtype=int) assert_equal(toarray(A[K]), B[K]) assert_equal(toarray(A[K, K]), B[K, K]) J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None] assert_equal(toarray(A[K, J]), B[K, J]) assert_equal(toarray(A[J, K]), B[J, K]) @contextlib.contextmanager def check_remains_sorted(X): """Checks that sorted indices property is retained through an operation """ if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices: yield return yield indices = X.indices.copy() X.has_sorted_indices = False X.sort_indices() assert_array_equal(indices, X.indices, 'Expected sorted indices, found unsorted') class _TestFancyIndexingAssign: def test_bad_index_assign(self): A = self.spcreator(np.zeros([5, 5])) assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2) assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5) def test_fancy_indexing_set(self): n, m = (5, 10) def _test_set_slice(i, j): A = self.spcreator((n, m)) B = asmatrix(np.zeros((n, m))) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") B[i, j] = 1 with check_remains_sorted(A): A[i, j] = 1 assert_array_almost_equal(A.toarray(), B) # [1:2,1:2] for i, j in [((2, 3, 4), slice(None, 10, 4)), (np.arange(3), slice(5, -2)), (slice(2, 5), slice(5, -2))]: _test_set_slice(i, j) for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]: _test_set_slice(i, j) def test_fancy_assignment_dtypes(self): def check(dtype): A = self.spcreator((5, 5), dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[[0,1],[0,1]] = dtype.type(1) assert_equal(A.sum(), dtype.type(1)*2) A[0:2,0:2] = dtype.type(1.0) assert_equal(A.sum(), dtype.type(1)*4) A[2,2] = dtype.type(1.0) assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1)) for dtype in supported_dtypes: check(np.dtype(dtype)) def test_sequence_assignment(self): A = self.spcreator((4,3)) B = self.spcreator(eye(3,4)) i0 = [0,1,2] i1 = (0,1,2) i2 = array(i0) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") with check_remains_sorted(A): A[0,i0] = B[i0,0].T A[1,i1] = B[i1,1].T A[2,i2] = B[i2,2].T assert_array_equal(A.toarray(), B.T.toarray()) # column slice A = self.spcreator((2,3)) with check_remains_sorted(A): A[1,1:3] = [10,20] assert_array_equal(A.toarray(), [[0, 0, 0], [0, 10, 20]]) # row slice A = self.spcreator((3,2)) with check_remains_sorted(A): A[1:3,1] = [[10],[20]] assert_array_equal(A.toarray(), [[0, 0], [0, 10], [0, 20]]) # both slices A = self.spcreator((3,3)) B = asmatrix(np.zeros((3,3))) with check_remains_sorted(A): for C in [A, B]: C[[0,1,2], [0,1,2]] = [4,5,6] assert_array_equal(A.toarray(), B) # both slices (2) A = self.spcreator((4, 3)) with check_remains_sorted(A): A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] assert_almost_equal(A.sum(), 6) B = asmatrix(np.zeros((4, 3))) B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] assert_array_equal(A.toarray(), B) def test_fancy_assign_empty(self): B = asmatrix(arange(50).reshape(5,10)) B[1,:] = 0 B[:,2] = 0 B[3,6] = 0 A = self.spcreator(B) K = np.array([False, False, False, False, False]) A[K] = 42 assert_equal(toarray(A), B) K = np.array([], dtype=int) A[K] = 42 assert_equal(toarray(A), B) A[K,K] = 42 assert_equal(toarray(A), B) J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None] A[K,J] = 42 assert_equal(toarray(A), B) A[J,K] = 42 assert_equal(toarray(A), B) class _TestFancyMultidim: def test_fancy_indexing_ndarray(self): sets = [ (np.array([[1], [2], [3]]), np.array([3, 4, 2])), (np.array([[1], [2], [3]]), np.array([[3, 4, 2]])), (np.array([[1, 2, 3]]), np.array([[3], [4], [2]])), (np.array([1, 2, 3]), np.array([[3], [4], [2]])), (np.array([[1, 2, 3], [3, 4, 2]]), np.array([[5, 6, 3], [2, 3, 1]])) ] # These inputs generate 3-D outputs # (np.array([[[1], [2], [3]], [[3], [4], [2]]]), # np.array([[[5], [6], [3]], [[2], [3], [1]]])), for I, J in sets: np.random.seed(1234) D = asmatrix(np.random.rand(5, 7)) S = self.spcreator(D) SIJ = S[I,J] if issparse(SIJ): SIJ = SIJ.toarray() assert_equal(SIJ, D[I,J]) I_bad = I + 5 J_bad = J + 7 assert_raises(IndexError, S.__getitem__, (I_bad,J)) assert_raises(IndexError, S.__getitem__, (I,J_bad)) # This would generate 3-D arrays -- not supported assert_raises(IndexError, S.__getitem__, ([I, I], slice(None))) assert_raises(IndexError, S.__getitem__, (slice(None), [J, J])) class _TestFancyMultidimAssign: def test_fancy_assign_ndarray(self): np.random.seed(1234) D = asmatrix(np.random.rand(5, 7)) S = self.spcreator(D) X = np.random.rand(2, 3) I = np.array([[1, 2, 3], [3, 4, 2]]) J = np.array([[5, 6, 3], [2, 3, 1]]) with check_remains_sorted(S): S[I,J] = X D[I,J] = X assert_equal(S.toarray(), D) I_bad = I + 5 J_bad = J + 7 C = [1, 2, 3] with check_remains_sorted(S): S[I,J] = C D[I,J] = C assert_equal(S.toarray(), D) with check_remains_sorted(S): S[I,J] = 3 D[I,J] = 3 assert_equal(S.toarray(), D) assert_raises(IndexError, S.__setitem__, (I_bad,J), C) assert_raises(IndexError, S.__setitem__, (I,J_bad), C) def test_fancy_indexing_multidim_set(self): n, m = (5, 10) def _test_set_slice(i, j): A = self.spcreator((n, m)) with check_remains_sorted(A), suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[i, j] = 1 B = asmatrix(np.zeros((n, m))) B[i, j] = 1 assert_array_almost_equal(A.toarray(), B) # [[[1, 2], [1, 2]], [1, 2]] for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]), (np.array([0, 4]), [[0, 3], [1, 2]]), ([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]: _test_set_slice(i, j) def test_fancy_assign_list(self): np.random.seed(1234) D = asmatrix(np.random.rand(5, 7)) S = self.spcreator(D) X = np.random.rand(2, 3) I = [[1, 2, 3], [3, 4, 2]] J = [[5, 6, 3], [2, 3, 1]] S[I,J] = X D[I,J] = X assert_equal(S.toarray(), D) I_bad = [[ii + 5 for ii in i] for i in I] J_bad = [[jj + 7 for jj in j] for j in J] C = [1, 2, 3] S[I,J] = C D[I,J] = C assert_equal(S.toarray(), D) S[I,J] = 3 D[I,J] = 3 assert_equal(S.toarray(), D) assert_raises(IndexError, S.__setitem__, (I_bad,J), C) assert_raises(IndexError, S.__setitem__, (I,J_bad), C) def test_fancy_assign_slice(self): np.random.seed(1234) D = asmatrix(np.random.rand(5, 7)) S = self.spcreator(D) I = [1, 2, 3, 3, 4, 2] J = [5, 6, 3, 2, 3, 1] I_bad = [ii + 5 for ii in I] J_bad = [jj + 7 for jj in J] C1 = [1, 2, 3, 4, 5, 6, 7] C2 = np.arange(5)[:, None] assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C1) assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C2) class _TestArithmetic: """ Test real/complex arithmetic """ def __arith_init(self): # these can be represented exactly in FP (so arithmetic should be exact) self.__A = array([[-1.5, 6.5, 0, 2.25, 0, 0], [3.125, -7.875, 0.625, 0, 0, 0], [0, 0, -0.125, 1.0, 0, 0], [0, 0, 8.375, 0, 0, 0]], 'float64') self.__B = array([[0.375, 0, 0, 0, -5, 2.5], [14.25, -3.75, 0, 0, -0.125, 0], [0, 7.25, 0, 0, 0, 0], [18.5, -0.0625, 0, 0, 0, 0]], 'complex128') self.__B.imag = array([[1.25, 0, 0, 0, 6, -3.875], [2.25, 4.125, 0, 0, 0, 2.75], [0, 4.125, 0, 0, 0, 0], [-0.0625, 0, 0, 0, 0, 0]], 'float64') # fractions are all x/16ths assert_array_equal((self.__A*16).astype('int32'),16*self.__A) assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real) assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag) self.__Asp = self.spcreator(self.__A) self.__Bsp = self.spcreator(self.__B) def test_add_sub(self): self.__arith_init() # basic tests assert_array_equal( (self.__Asp + self.__Bsp).toarray(), self.__A + self.__B ) # check conversions for x in supported_dtypes: with np.errstate(invalid="ignore"): A = self.__A.astype(x) Asp = self.spcreator(A) for y in supported_dtypes: if not np.issubdtype(y, np.complexfloating): with np.errstate(invalid="ignore"): B = self.__B.real.astype(y) else: B = self.__B.astype(y) Bsp = self.spcreator(B) # addition D1 = A + B S1 = Asp + Bsp assert_equal(S1.dtype,D1.dtype) assert_array_equal(S1.toarray(), D1) assert_array_equal(Asp + B,D1) # check sparse + dense assert_array_equal(A + Bsp,D1) # check dense + sparse # subtraction if np.dtype('bool') in [x, y]: # boolean array subtraction deprecated in 1.9.0 continue D1 = A - B S1 = Asp - Bsp assert_equal(S1.dtype,D1.dtype) assert_array_equal(S1.toarray(), D1) assert_array_equal(Asp - B,D1) # check sparse - dense assert_array_equal(A - Bsp,D1) # check dense - sparse def test_mu(self): self.__arith_init() # basic tests assert_array_equal((self.__Asp @ self.__Bsp.T).toarray(), self.__A @ self.__B.T) for x in supported_dtypes: with np.errstate(invalid="ignore"): A = self.__A.astype(x) Asp = self.spcreator(A) for y in supported_dtypes: if np.issubdtype(y, np.complexfloating): B = self.__B.astype(y) else: with np.errstate(invalid="ignore"): B = self.__B.real.astype(y) Bsp = self.spcreator(B) D1 = A @ B.T S1 = Asp @ Bsp.T assert_allclose(S1.toarray(), D1, atol=1e-14*abs(D1).max()) assert_equal(S1.dtype,D1.dtype) class _TestMinMax: def test_minmax(self): for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]: D = np.arange(20, dtype=dtype).reshape(5,4) X = self.spcreator(D) assert_equal(X.min(), 0) assert_equal(X.max(), 19) assert_equal(X.min().dtype, dtype) assert_equal(X.max().dtype, dtype) D *= -1 X = self.spcreator(D) assert_equal(X.min(), -19) assert_equal(X.max(), 0) D += 5 X = self.spcreator(D) assert_equal(X.min(), -14) assert_equal(X.max(), 5) # try a fully dense matrix X = self.spcreator(np.arange(1, 10).reshape(3, 3)) assert_equal(X.min(), 1) assert_equal(X.min().dtype, X.dtype) X = -X assert_equal(X.max(), -1) # and a fully sparse matrix Z = self.spcreator(np.zeros(1)) assert_equal(Z.min(), 0) assert_equal(Z.max(), 0) assert_equal(Z.max().dtype, Z.dtype) # another test D = np.arange(20, dtype=float).reshape(5,4) D[0:2, :] = 0 X = self.spcreator(D) assert_equal(X.min(), 0) assert_equal(X.max(), 19) # zero-size matrices for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]: X = self.spcreator(D) assert_raises(ValueError, X.min) assert_raises(ValueError, X.max) def test_minmax_axis(self): D = np.arange(50).reshape(5, 10) # completely empty rows, leaving some completely full: D[1, :] = 0 # empty at end for reduceat: D[:, 9] = 0 # partial rows/cols: D[3, 3] = 0 # entries on either side of 0: D[2, 2] = -1 X = self.spcreator(D) axes = [-2, -1, 0, 1] for axis in axes: assert_array_equal( X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=True) ) assert_array_equal( X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=True) ) # full matrix D = np.arange(1, 51).reshape(10, 5) X = self.spcreator(D) for axis in axes: assert_array_equal( X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=True) ) assert_array_equal( X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=True) ) # empty matrix D = np.zeros((10, 5)) X = self.spcreator(D) for axis in axes: assert_array_equal( X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=True) ) assert_array_equal( X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=True) ) axes_even = [0, -2] axes_odd = [1, -1] # zero-size matrices D = np.zeros((0, 10)) X = self.spcreator(D) for axis in axes_even: assert_raises(ValueError, X.min, axis=axis) assert_raises(ValueError, X.max, axis=axis) for axis in axes_odd: assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).toarray()) assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).toarray()) D = np.zeros((10, 0)) X = self.spcreator(D) for axis in axes_odd: assert_raises(ValueError, X.min, axis=axis) assert_raises(ValueError, X.max, axis=axis) for axis in axes_even: assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).toarray()) assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).toarray()) def test_nanminmax(self): D = matrix(np.arange(50).reshape(5,10), dtype=float) D[1, :] = 0 D[:, 9] = 0 D[3, 3] = 0 D[2, 2] = -1 D[4, 2] = np.nan D[1, 4] = np.nan X = self.spcreator(D) X_nan_maximum = X.nanmax() assert np.isscalar(X_nan_maximum) assert X_nan_maximum == np.nanmax(D) X_nan_minimum = X.nanmin() assert np.isscalar(X_nan_minimum) assert X_nan_minimum == np.nanmin(D) axes = [-2, -1, 0, 1] for axis in axes: X_nan_maxima = X.nanmax(axis=axis) assert isinstance(X_nan_maxima, coo_matrix) assert_allclose(X_nan_maxima.toarray(), np.nanmax(D, axis=axis)) X_nan_minima = X.nanmin(axis=axis) assert isinstance(X_nan_minima, coo_matrix) assert_allclose(X_nan_minima.toarray(), np.nanmin(D, axis=axis)) def test_minmax_invalid_params(self): dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) for fname in ('min', 'max'): func = getattr(datsp, fname) assert_raises(ValueError, func, axis=3) assert_raises(TypeError, func, axis=(0, 1)) assert_raises(TypeError, func, axis=1.5) assert_raises(ValueError, func, axis=1, out=1) def test_numpy_minmax(self): # See gh-5987 # xref gh-7460 in 'numpy' from scipy.sparse import _data dat = array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) datsp = self.spcreator(dat) # We are only testing sparse matrices who have # implemented 'min' and 'max' because they are # the ones with the compatibility issues with # the 'numpy' implementation. if isinstance(datsp, _data._minmax_mixin): assert_array_equal(np.min(datsp), np.min(dat)) assert_array_equal(np.max(datsp), np.max(dat)) def test_argmax(self): from scipy.sparse import _data D1 = np.array([ [-1, 5, 2, 3], [0, 0, -1, -2], [-1, -2, -3, -4], [1, 2, 3, 4], [1, 2, 0, 0], ]) D2 = D1.transpose() # Non-regression test cases for gh-16929. D3 = np.array([[4, 3], [7, 5]]) D4 = np.array([[4, 3], [7, 0]]) D5 = np.array([[5, 5, 3], [4, 9, 10], [3, 4, 9]]) for D in [D1, D2, D3, D4, D5]: mat = self.spcreator(D) if not isinstance(mat, _data._minmax_mixin): continue assert_equal(mat.argmax(), np.argmax(D)) assert_equal(mat.argmin(), np.argmin(D)) assert_equal(mat.argmax(axis=0), asmatrix(np.argmax(D, axis=0))) assert_equal(mat.argmin(axis=0), asmatrix(np.argmin(D, axis=0))) assert_equal(mat.argmax(axis=1), asmatrix(np.argmax(D, axis=1).reshape(-1, 1))) assert_equal(mat.argmin(axis=1), asmatrix(np.argmin(D, axis=1).reshape(-1, 1))) D1 = np.empty((0, 5)) D2 = np.empty((5, 0)) for axis in [None, 0]: mat = self.spcreator(D1) assert_raises(ValueError, mat.argmax, axis=axis) assert_raises(ValueError, mat.argmin, axis=axis) for axis in [None, 1]: mat = self.spcreator(D2) assert_raises(ValueError, mat.argmax, axis=axis) assert_raises(ValueError, mat.argmin, axis=axis) class _TestGetNnzAxis: def test_getnnz_axis(self): dat = array([[0, 2], [3, 5], [-6, 9]]) bool_dat = dat.astype(bool) datsp = self.spcreator(dat) accepted_return_dtypes = (np.int32, np.int64) assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None)) assert_array_equal(bool_dat.sum(), datsp.getnnz()) assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0)) assert_in(datsp.getnnz(axis=0).dtype, accepted_return_dtypes) assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1)) assert_in(datsp.getnnz(axis=1).dtype, accepted_return_dtypes) assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2)) assert_in(datsp.getnnz(axis=-2).dtype, accepted_return_dtypes) assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1)) assert_in(datsp.getnnz(axis=-1).dtype, accepted_return_dtypes) assert_raises(ValueError, datsp.getnnz, axis=2) #------------------------------------------------------------------------------ # Tailored base class for generic tests #------------------------------------------------------------------------------ def _possibly_unimplemented(cls, require=True): """ Construct a class that either runs tests as usual (require=True), or each method skips if it encounters a common error. """ if require: return cls else: def wrap(fc): @functools.wraps(fc) def wrapper(*a, **kw): try: return fc(*a, **kw) except (NotImplementedError, TypeError, ValueError, IndexError, AttributeError): raise pytest.skip("feature not implemented") return wrapper new_dict = dict(cls.__dict__) for name, func in cls.__dict__.items(): if name.startswith('test_'): new_dict[name] = wrap(func) return type(cls.__name__ + "NotImplemented", cls.__bases__, new_dict) def sparse_test_class(getset=True, slicing=True, slicing_assign=True, fancy_indexing=True, fancy_assign=True, fancy_multidim_indexing=True, fancy_multidim_assign=True, minmax=True, nnz_axis=True): """ Construct a base class, optionally converting some of the tests in the suite to check that the feature is not implemented. """ bases = (_TestCommon, _possibly_unimplemented(_TestGetSet, getset), _TestSolve, _TestInplaceArithmetic, _TestArithmetic, _possibly_unimplemented(_TestSlicing, slicing), _possibly_unimplemented(_TestSlicingAssign, slicing_assign), _possibly_unimplemented(_TestFancyIndexing, fancy_indexing), _possibly_unimplemented(_TestFancyIndexingAssign, fancy_assign), _possibly_unimplemented(_TestFancyMultidim, fancy_indexing and fancy_multidim_indexing), _possibly_unimplemented(_TestFancyMultidimAssign, fancy_multidim_assign and fancy_assign), _possibly_unimplemented(_TestMinMax, minmax), _possibly_unimplemented(_TestGetNnzAxis, nnz_axis)) # check that test names do not clash names = {} for cls in bases: for name in cls.__dict__: if not name.startswith('test_'): continue old_cls = names.get(name) if old_cls is not None: raise ValueError("Test class {} overloads test {} defined in {}".format( cls.__name__, name, old_cls.__name__)) names[name] = cls return type("TestBase", bases, {}) #------------------------------------------------------------------------------ # Matrix class based tests #------------------------------------------------------------------------------ class TestCSR(sparse_test_class()): @classmethod def spcreator(cls, *args, **kwargs): with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a csr_matrix is expensive") return csr_matrix(*args, **kwargs) math_dtypes = [np.bool_, np.int_, np.float_, np.complex_] def test_constructor1(self): b = array([[0, 4, 0], [3, 0, 0], [0, 2, 0]], 'd') bsp = csr_matrix(b) assert_array_almost_equal(bsp.data,[4,3,2]) assert_array_equal(bsp.indices,[1,0,1]) assert_array_equal(bsp.indptr,[0,1,2,3]) assert_equal(bsp.getnnz(),3) assert_equal(bsp.getformat(),'csr') assert_array_equal(bsp.toarray(), b) def test_constructor2(self): b = zeros((6,6),'d') b[3,4] = 5 bsp = csr_matrix(b) assert_array_almost_equal(bsp.data,[5]) assert_array_equal(bsp.indices,[4]) assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1]) assert_array_almost_equal(bsp.toarray(), b) def test_constructor3(self): b = array([[1, 0], [0, 2], [3, 0]], 'd') bsp = csr_matrix(b) assert_array_almost_equal(bsp.data,[1,2,3]) assert_array_equal(bsp.indices,[0,1,0]) assert_array_equal(bsp.indptr,[0,1,2,3]) assert_array_almost_equal(bsp.toarray(), b) def test_constructor4(self): # using (data, ij) format row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.]) ij = vstack((row,col)) csr = csr_matrix((data,ij),(4,3)) assert_array_equal(arange(12).reshape(4, 3), csr.toarray()) # using Python lists and a specified dtype csr = csr_matrix(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64) dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64) assert_array_equal(dense, csr.toarray()) def test_constructor5(self): # infer dimensions from arrays indptr = array([0,1,3,3]) indices = array([0,5,1,2]) data = array([1,2,3,4]) csr = csr_matrix((data, indices, indptr)) assert_array_equal(csr.shape,(3,6)) def test_constructor6(self): # infer dimensions and dtype from lists indptr = [0, 1, 3, 3] indices = [0, 5, 1, 2] data = [1, 2, 3, 4] csr = csr_matrix((data, indices, indptr)) assert_array_equal(csr.shape, (3,6)) assert_(np.issubdtype(csr.dtype, np.signedinteger)) def test_constructor_smallcol(self): # int64 indices not required data = arange(6) + 1 col = array([1, 2, 1, 0, 0, 2], dtype=np.int64) ptr = array([0, 2, 4, 6], dtype=np.int64) a = csr_matrix((data, col, ptr), shape=(3, 3)) b = array([[0, 1, 2], [4, 3, 0], [5, 0, 6]], 'd') assert_equal(a.indptr.dtype, np.dtype(np.int32)) assert_equal(a.indices.dtype, np.dtype(np.int32)) assert_array_equal(a.toarray(), b) def test_constructor_largecol(self): # int64 indices required data = arange(6) + 1 large = np.iinfo(np.int32).max + 100 col = array([0, 1, 2, large, large+1, large+2], dtype=np.int64) ptr = array([0, 2, 4, 6], dtype=np.int64) a = csr_matrix((data, col, ptr)) assert_equal(a.indptr.dtype, np.dtype(np.int64)) assert_equal(a.indices.dtype, np.dtype(np.int64)) assert_array_equal(a.shape, (3, max(col)+1)) def test_sort_indices(self): data = arange(5) indices = array([7, 2, 1, 5, 4]) indptr = array([0, 3, 5]) asp = csr_matrix((data, indices, indptr), shape=(2,10)) bsp = asp.copy() asp.sort_indices() assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) assert_array_equal(asp.toarray(), bsp.toarray()) def test_eliminate_zeros(self): data = array([1, 0, 0, 0, 2, 0, 3, 0]) indices = array([1, 2, 3, 4, 5, 6, 7, 8]) indptr = array([0, 3, 8]) asp = csr_matrix((data, indices, indptr), shape=(2,10)) bsp = asp.copy() asp.eliminate_zeros() assert_array_equal(asp.nnz, 3) assert_array_equal(asp.data,[1, 2, 3]) assert_array_equal(asp.toarray(), bsp.toarray()) def test_ufuncs(self): X = csr_matrix(np.arange(20).reshape(4, 5) / 20.) for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]: assert_equal(hasattr(csr_matrix, f), True) X2 = getattr(X, f)() assert_equal(X.shape, X2.shape) assert_array_equal(X.indices, X2.indices) assert_array_equal(X.indptr, X2.indptr) assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray())) def test_unsorted_arithmetic(self): data = arange(5) indices = array([7, 2, 1, 5, 4]) indptr = array([0, 3, 5]) asp = csr_matrix((data, indices, indptr), shape=(2,10)) data = arange(6) indices = array([8, 1, 5, 7, 2, 4]) indptr = array([0, 2, 6]) bsp = csr_matrix((data, indices, indptr), shape=(2,10)) assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray()) def test_fancy_indexing_broadcast(self): # broadcasting indexing mode is supported I = np.array([[1], [2], [3]]) J = np.array([3, 4, 2]) np.random.seed(1234) D = asmatrix(np.random.rand(5, 7)) S = self.spcreator(D) SIJ = S[I,J] if issparse(SIJ): SIJ = SIJ.toarray() assert_equal(SIJ, D[I,J]) def test_has_sorted_indices(self): "Ensure has_sorted_indices memoizes sorted state for sort_indices" sorted_inds = np.array([0, 1]) unsorted_inds = np.array([1, 0]) data = np.array([1, 1]) indptr = np.array([0, 2]) M = csr_matrix((data, sorted_inds, indptr)).copy() assert_equal(True, M.has_sorted_indices) assert type(M.has_sorted_indices) == bool M = csr_matrix((data, unsorted_inds, indptr)).copy() assert_equal(False, M.has_sorted_indices) # set by sorting M.sort_indices() assert_equal(True, M.has_sorted_indices) assert_array_equal(M.indices, sorted_inds) M = csr_matrix((data, unsorted_inds, indptr)).copy() # set manually (although underlyingly unsorted) M.has_sorted_indices = True assert_equal(True, M.has_sorted_indices) assert_array_equal(M.indices, unsorted_inds) # ensure sort bypassed when has_sorted_indices == True M.sort_indices() assert_array_equal(M.indices, unsorted_inds) def test_has_canonical_format(self): "Ensure has_canonical_format memoizes state for sum_duplicates" M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1]))) assert_equal(True, M.has_canonical_format) indices = np.array([0, 0]) # contains duplicate data = np.array([1, 1]) indptr = np.array([0, 2]) M = csr_matrix((data, indices, indptr)).copy() assert_equal(False, M.has_canonical_format) assert type(M.has_canonical_format) == bool # set by deduplicating M.sum_duplicates() assert_equal(True, M.has_canonical_format) assert_equal(1, len(M.indices)) M = csr_matrix((data, indices, indptr)).copy() # set manually (although underlyingly duplicated) M.has_canonical_format = True assert_equal(True, M.has_canonical_format) assert_equal(2, len(M.indices)) # unaffected content # ensure deduplication bypassed when has_canonical_format == True M.sum_duplicates() assert_equal(2, len(M.indices)) # unaffected content def test_scalar_idx_dtype(self): # Check that index dtype takes into account all parameters # passed to sparsetools, including the scalar ones indptr = np.zeros(2, dtype=np.int32) indices = np.zeros(0, dtype=np.int32) vals = np.zeros(0) a = csr_matrix((vals, indices, indptr), shape=(1, 2**31-1)) b = csr_matrix((vals, indices, indptr), shape=(1, 2**31)) ij = np.zeros((2, 0), dtype=np.int32) c = csr_matrix((vals, ij), shape=(1, 2**31-1)) d = csr_matrix((vals, ij), shape=(1, 2**31)) e = csr_matrix((1, 2**31-1)) f = csr_matrix((1, 2**31)) assert_equal(a.indptr.dtype, np.int32) assert_equal(b.indptr.dtype, np.int64) assert_equal(c.indptr.dtype, np.int32) assert_equal(d.indptr.dtype, np.int64) assert_equal(e.indptr.dtype, np.int32) assert_equal(f.indptr.dtype, np.int64) # These shouldn't fail for x in [a, b, c, d, e, f]: x + x def test_binop_explicit_zeros(self): # Check that binary ops don't introduce spurious explicit zeros. # See gh-9619 for context. a = csr_matrix([0, 1, 0]) b = csr_matrix([1, 1, 0]) assert (a + b).nnz == 2 assert a.multiply(b).nnz == 1 TestCSR.init_class() class TestCSC(sparse_test_class()): @classmethod def spcreator(cls, *args, **kwargs): with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a csc_matrix is expensive") return csc_matrix(*args, **kwargs) math_dtypes = [np.bool_, np.int_, np.float_, np.complex_] def test_constructor1(self): b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd') bsp = csc_matrix(b) assert_array_almost_equal(bsp.data,[1,2,1,3]) assert_array_equal(bsp.indices,[0,2,1,2]) assert_array_equal(bsp.indptr,[0,1,2,3,4]) assert_equal(bsp.getnnz(),4) assert_equal(bsp.shape,b.shape) assert_equal(bsp.getformat(),'csc') def test_constructor2(self): b = zeros((6,6),'d') b[2,4] = 5 bsp = csc_matrix(b) assert_array_almost_equal(bsp.data,[5]) assert_array_equal(bsp.indices,[2]) assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1]) def test_constructor3(self): b = array([[1, 0], [0, 0], [0, 2]], 'd') bsp = csc_matrix(b) assert_array_almost_equal(bsp.data,[1,2]) assert_array_equal(bsp.indices,[0,2]) assert_array_equal(bsp.indptr,[0,1,2]) def test_constructor4(self): # using (data, ij) format row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.]) ij = vstack((row,col)) csc = csc_matrix((data,ij),(4,3)) assert_array_equal(arange(12).reshape(4, 3), csc.toarray()) def test_constructor5(self): # infer dimensions from arrays indptr = array([0,1,3,3]) indices = array([0,5,1,2]) data = array([1,2,3,4]) csc = csc_matrix((data, indices, indptr)) assert_array_equal(csc.shape,(6,3)) def test_constructor6(self): # infer dimensions and dtype from lists indptr = [0, 1, 3, 3] indices = [0, 5, 1, 2] data = [1, 2, 3, 4] csc = csc_matrix((data, indices, indptr)) assert_array_equal(csc.shape,(6,3)) assert_(np.issubdtype(csc.dtype, np.signedinteger)) def test_eliminate_zeros(self): data = array([1, 0, 0, 0, 2, 0, 3, 0]) indices = array([1, 2, 3, 4, 5, 6, 7, 8]) indptr = array([0, 3, 8]) asp = csc_matrix((data, indices, indptr), shape=(10,2)) bsp = asp.copy() asp.eliminate_zeros() assert_array_equal(asp.nnz, 3) assert_array_equal(asp.data,[1, 2, 3]) assert_array_equal(asp.toarray(), bsp.toarray()) def test_sort_indices(self): data = arange(5) row = array([7, 2, 1, 5, 4]) ptr = [0, 3, 5] asp = csc_matrix((data, row, ptr), shape=(10,2)) bsp = asp.copy() asp.sort_indices() assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) assert_array_equal(asp.toarray(), bsp.toarray()) def test_ufuncs(self): X = csc_matrix(np.arange(21).reshape(7, 3) / 21.) for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]: assert_equal(hasattr(csr_matrix, f), True) X2 = getattr(X, f)() assert_equal(X.shape, X2.shape) assert_array_equal(X.indices, X2.indices) assert_array_equal(X.indptr, X2.indptr) assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray())) def test_unsorted_arithmetic(self): data = arange(5) indices = array([7, 2, 1, 5, 4]) indptr = array([0, 3, 5]) asp = csc_matrix((data, indices, indptr), shape=(10,2)) data = arange(6) indices = array([8, 1, 5, 7, 2, 4]) indptr = array([0, 2, 6]) bsp = csc_matrix((data, indices, indptr), shape=(10,2)) assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray()) def test_fancy_indexing_broadcast(self): # broadcasting indexing mode is supported I = np.array([[1], [2], [3]]) J = np.array([3, 4, 2]) np.random.seed(1234) D = asmatrix(np.random.rand(5, 7)) S = self.spcreator(D) SIJ = S[I,J] if issparse(SIJ): SIJ = SIJ.toarray() assert_equal(SIJ, D[I,J]) def test_scalar_idx_dtype(self): # Check that index dtype takes into account all parameters # passed to sparsetools, including the scalar ones indptr = np.zeros(2, dtype=np.int32) indices = np.zeros(0, dtype=np.int32) vals = np.zeros(0) a = csc_matrix((vals, indices, indptr), shape=(2**31-1, 1)) b = csc_matrix((vals, indices, indptr), shape=(2**31, 1)) ij = np.zeros((2, 0), dtype=np.int32) c = csc_matrix((vals, ij), shape=(2**31-1, 1)) d = csc_matrix((vals, ij), shape=(2**31, 1)) e = csr_matrix((1, 2**31-1)) f = csr_matrix((1, 2**31)) assert_equal(a.indptr.dtype, np.int32) assert_equal(b.indptr.dtype, np.int64) assert_equal(c.indptr.dtype, np.int32) assert_equal(d.indptr.dtype, np.int64) assert_equal(e.indptr.dtype, np.int32) assert_equal(f.indptr.dtype, np.int64) # These shouldn't fail for x in [a, b, c, d, e, f]: x + x TestCSC.init_class() class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)): spcreator = dok_matrix math_dtypes = [np.int_, np.float_, np.complex_] def test_mult(self): A = dok_matrix((10,10)) A[0,3] = 10 A[5,6] = 20 D = A*A.T E = A*A.H assert_array_equal(D.A, E.A) def test_add_nonzero(self): A = self.spcreator((3,2)) A[0,1] = -10 A[2,0] = 20 A = A + 10 B = array([[10, 0], [10, 10], [30, 10]]) assert_array_equal(A.toarray(), B) A = A + 1j B = B + 1j assert_array_equal(A.toarray(), B) def test_dok_divide_scalar(self): A = self.spcreator((3,2)) A[0,1] = -10 A[2,0] = 20 assert_array_equal((A/1j).toarray(), A.toarray()/1j) assert_array_equal((A/9).toarray(), A.toarray()/9) def test_convert(self): # Test provided by Andrew Straw. Fails in SciPy <= r1477. (m, n) = (6, 7) a = dok_matrix((m, n)) # set a few elements, but none in the last column a[2,1] = 1 a[0,2] = 2 a[3,1] = 3 a[1,5] = 4 a[4,3] = 5 a[4,2] = 6 # assert that the last column is all zeros assert_array_equal(a.toarray()[:,n-1], zeros(m,)) # make sure it still works for CSC format csc = a.tocsc() assert_array_equal(csc.toarray()[:,n-1], zeros(m,)) # now test CSR (m, n) = (n, m) b = a.transpose() assert_equal(b.shape, (m, n)) # assert that the last row is all zeros assert_array_equal(b.toarray()[m-1,:], zeros(n,)) # make sure it still works for CSR format csr = b.tocsr() assert_array_equal(csr.toarray()[m-1,:], zeros(n,)) def test_ctor(self): # Empty ctor assert_raises(TypeError, dok_matrix) # Dense ctor b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd') A = dok_matrix(b) assert_equal(b.dtype, A.dtype) assert_equal(A.toarray(), b) # Sparse ctor c = csr_matrix(b) assert_equal(A.toarray(), c.toarray()) data = [[0, 1, 2], [3, 0, 0]] d = dok_matrix(data, dtype=np.float32) assert_equal(d.dtype, np.float32) da = d.toarray() assert_equal(da.dtype, np.float32) assert_array_equal(da, data) def test_ticket1160(self): # Regression test for ticket #1160. a = dok_matrix((3,3)) a[0,0] = 0 # This assert would fail, because the above assignment would # incorrectly call __set_item__ even though the value was 0. assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys") # Slice assignments were also affected. b = dok_matrix((3,3)) b[:,0] = 0 assert_(len(b.keys()) == 0, "Unexpected entries in keys") TestDOK.init_class() class TestLIL(sparse_test_class(minmax=False)): spcreator = lil_matrix math_dtypes = [np.int_, np.float_, np.complex_] def test_dot(self): A = zeros((10, 10), np.complex128) A[0, 3] = 10 A[5, 6] = 20j B = lil_matrix((10, 10), dtype=np.complex128) B[0, 3] = 10 B[5, 6] = 20j # TODO: properly handle this assertion on ppc64le if platform.machine() != 'ppc64le': assert_array_equal(A @ A.T, (B * B.T).toarray()) assert_array_equal(A @ A.conjugate().T, (B * B.H).toarray()) def test_scalar_mul(self): x = lil_matrix((3, 3)) x[0, 0] = 2 x = x*2 assert_equal(x[0, 0], 4) x = x*0 assert_equal(x[0, 0], 0) def test_inplace_ops(self): A = lil_matrix([[0, 2, 3], [4, 0, 6]]) B = lil_matrix([[0, 1, 0], [0, 2, 3]]) data = {'add': (B, A + B), 'sub': (B, A - B), 'mul': (3, A * 3)} for op, (other, expected) in data.items(): result = A.copy() getattr(result, '__i%s__' % op)(other) assert_array_equal(result.toarray(), expected.toarray()) # Ticket 1604. A = lil_matrix((1, 3), dtype=np.dtype('float64')) B = array([0.1, 0.1, 0.1]) A[0, :] += B assert_array_equal(A[0, :].toarray().squeeze(), B) def test_lil_iteration(self): row_data = [[1, 2, 3], [4, 5, 6]] B = lil_matrix(array(row_data)) for r, row in enumerate(B): assert_array_equal(row.toarray(), array(row_data[r], ndmin=2)) def test_lil_from_csr(self): # Tests whether a lil_matrix can be constructed from a # csr_matrix. B = lil_matrix((10, 10)) B[0, 3] = 10 B[5, 6] = 20 B[8, 3] = 30 B[3, 8] = 40 B[8, 9] = 50 C = B.tocsr() D = lil_matrix(C) assert_array_equal(C.A, D.A) def test_fancy_indexing_lil(self): M = asmatrix(arange(25).reshape(5, 5)) A = lil_matrix(M) assert_equal(A[array([1, 2, 3]), 2:3].toarray(), M[array([1, 2, 3]), 2:3]) def test_point_wise_multiply(self): l = lil_matrix((4, 3)) l[0, 0] = 1 l[1, 1] = 2 l[2, 2] = 3 l[3, 1] = 4 m = lil_matrix((4, 3)) m[0, 0] = 1 m[0, 1] = 2 m[2, 2] = 3 m[3, 1] = 4 m[3, 2] = 4 assert_array_equal(l.multiply(m).toarray(), m.multiply(l).toarray()) assert_array_equal(l.multiply(m).toarray(), [[1, 0, 0], [0, 0, 0], [0, 0, 9], [0, 16, 0]]) def test_lil_multiply_removal(self): # Ticket #1427. a = lil_matrix(np.ones((3, 3))) a *= 2. a[0, :] = 0 TestLIL.init_class() class TestCOO(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spcreator = coo_matrix math_dtypes = [np.int_, np.float_, np.complex_] def test_constructor1(self): # unsorted triplet format row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.]) coo = coo_matrix((data,(row,col)),(4,3)) assert_array_equal(arange(12).reshape(4, 3), coo.toarray()) # using Python lists and a specified dtype coo = coo_matrix(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64) dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64) assert_array_equal(dense, coo.toarray()) def test_constructor2(self): # unsorted triplet format with duplicates (which are summed) row = array([0,1,2,2,2,2,0,0,2,2]) col = array([0,2,0,2,1,1,1,0,0,2]) data = array([2,9,-4,5,7,0,-1,2,1,-5]) coo = coo_matrix((data,(row,col)),(3,3)) mat = array([[4, -1, 0], [0, 0, 9], [-3, 7, 0]]) assert_array_equal(mat, coo.toarray()) def test_constructor3(self): # empty matrix coo = coo_matrix((4,3)) assert_array_equal(coo.shape,(4,3)) assert_array_equal(coo.row,[]) assert_array_equal(coo.col,[]) assert_array_equal(coo.data,[]) assert_array_equal(coo.toarray(), zeros((4, 3))) def test_constructor4(self): # from dense matrix mat = array([[0,1,0,0], [7,0,3,0], [0,4,0,0]]) coo = coo_matrix(mat) assert_array_equal(coo.toarray(), mat) # upgrade rank 1 arrays to row matrix mat = array([0,1,0,0]) coo = coo_matrix(mat) assert_array_equal(coo.toarray(), mat.reshape(1, -1)) # error if second arg interpreted as shape (gh-9919) with pytest.raises(TypeError, match=r'object cannot be interpreted'): coo_matrix([0, 11, 22, 33], ([0, 1, 2, 3], [0, 0, 0, 0])) # error if explicit shape arg doesn't match the dense matrix with pytest.raises(ValueError, match=r'inconsistent shapes'): coo_matrix([0, 11, 22, 33], shape=(4, 4)) def test_constructor_data_ij_dtypeNone(self): data = [1] coo = coo_matrix((data, ([0], [0])), dtype=None) assert coo.dtype == np.array(data).dtype @pytest.mark.xfail(run=False, reason='COO does not have a __getitem__') def test_iterator(self): pass def test_todia_all_zeros(self): zeros = [[0, 0]] dia = coo_matrix(zeros).todia() assert_array_equal(dia.A, zeros) def test_sum_duplicates(self): coo = coo_matrix((4,3)) coo.sum_duplicates() coo = coo_matrix(([1,2], ([1,0], [1,0]))) coo.sum_duplicates() assert_array_equal(coo.A, [[2,0],[0,1]]) coo = coo_matrix(([1,2], ([1,1], [1,1]))) coo.sum_duplicates() assert_array_equal(coo.A, [[0,0],[0,3]]) assert_array_equal(coo.row, [1]) assert_array_equal(coo.col, [1]) assert_array_equal(coo.data, [3]) def test_todok_duplicates(self): coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0]))) dok = coo.todok() assert_array_equal(dok.A, coo.A) def test_eliminate_zeros(self): data = array([1, 0, 0, 0, 2, 0, 3, 0]) row = array([0, 0, 0, 1, 1, 1, 1, 1]) col = array([1, 2, 3, 4, 5, 6, 7, 8]) asp = coo_matrix((data, (row, col)), shape=(2,10)) bsp = asp.copy() asp.eliminate_zeros() assert_((asp.data != 0).all()) assert_array_equal(asp.A, bsp.A) def test_reshape_copy(self): arr = [[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]] new_shape = (2, 6) x = coo_matrix(arr) y = x.reshape(new_shape) assert_(y.data is x.data) y = x.reshape(new_shape, copy=False) assert_(y.data is x.data) y = x.reshape(new_shape, copy=True) assert_(not np.may_share_memory(y.data, x.data)) def test_large_dimensions_reshape(self): # Test that reshape is immune to integer overflow when number of elements # exceeds 2^31-1 mat1 = coo_matrix(([1], ([3000000], [1000])), (3000001, 1001)) mat2 = coo_matrix(([1], ([1000], [3000000])), (1001, 3000001)) # assert_array_equal is slow for big matrices because it expects dense # Using __ne__ and nnz instead assert_((mat1.reshape((1001, 3000001), order='C') != mat2).nnz == 0) assert_((mat2.reshape((3000001, 1001), order='F') != mat1).nnz == 0) TestCOO.init_class() class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False, minmax=False, nnz_axis=False)): spcreator = dia_matrix math_dtypes = [np.int_, np.float_, np.complex_] def test_constructor1(self): D = array([[1, 0, 3, 0], [1, 2, 0, 4], [0, 2, 3, 0], [0, 0, 3, 4]]) data = np.array([[1,2,3,4]]).repeat(3,axis=0) offsets = np.array([0,-1,2]) assert_equal(dia_matrix((data, offsets), shape=(4, 4)).toarray(), D) @pytest.mark.xfail(run=False, reason='DIA does not have a __getitem__') def test_iterator(self): pass @with_64bit_maxval_limit(3) def test_setdiag_dtype(self): m = dia_matrix(np.eye(3)) assert_equal(m.offsets.dtype, np.int32) m.setdiag((3,), k=2) assert_equal(m.offsets.dtype, np.int32) m = dia_matrix(np.eye(4)) assert_equal(m.offsets.dtype, np.int64) m.setdiag((3,), k=3) assert_equal(m.offsets.dtype, np.int64) @pytest.mark.skip(reason='DIA stores extra zeros') def test_getnnz_axis(self): pass def test_convert_gh14555(self): # regression test for gh-14555 m = dia_matrix(([[1, 1, 0]], [-1]), shape=(4, 2)) expected = m.toarray() assert_array_equal(m.tocsc().toarray(), expected) assert_array_equal(m.tocsr().toarray(), expected) def test_tocoo_gh10050(self): # regression test for gh-10050 m = dia_matrix([[1, 2], [3, 4]]).tocoo() flat_inds = np.ravel_multi_index((m.row, m.col), m.shape) inds_are_sorted = np.all(np.diff(flat_inds) > 0) assert m.has_canonical_format == inds_are_sorted TestDIA.init_class() class TestBSR(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False, nnz_axis=False)): spcreator = bsr_matrix math_dtypes = [np.int_, np.float_, np.complex_] def test_constructor1(self): # check native BSR format constructor indptr = array([0,2,2,4]) indices = array([0,2,2,3]) data = zeros((4,2,3)) data[0] = array([[0, 1, 2], [3, 0, 5]]) data[1] = array([[0, 2, 4], [6, 0, 10]]) data[2] = array([[0, 4, 8], [12, 0, 20]]) data[3] = array([[0, 5, 10], [15, 0, 25]]) A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]]) Asp = bsr_matrix((data,indices,indptr),shape=(6,12)) assert_equal(Asp.toarray(), A) # infer shape from arrays Asp = bsr_matrix((data,indices,indptr)) assert_equal(Asp.toarray(), A) def test_constructor2(self): # construct from dense # test zero mats for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]: A = zeros(shape) assert_equal(bsr_matrix(A).toarray(), A) A = zeros((4,6)) assert_equal(bsr_matrix(A, blocksize=(2, 2)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A) A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]]) assert_equal(bsr_matrix(A).toarray(), A) assert_equal(bsr_matrix(A, shape=(6, 12)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(1, 1)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(2, 6)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(2, 12)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(3, 12)).toarray(), A) assert_equal(bsr_matrix(A, blocksize=(6, 12)).toarray(), A) A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]]) assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A) def test_constructor3(self): # construct from coo-like (data,(row,col)) format arg = ([1,2,3], ([0,1,1], [0,0,1])) A = array([[1,0],[2,3]]) assert_equal(bsr_matrix(arg, blocksize=(2, 2)).toarray(), A) def test_constructor4(self): # regression test for gh-6292: bsr_matrix((data, indices, indptr)) was # trying to compare an int to a None n = 8 data = np.ones((n, n, 1), dtype=np.int8) indptr = np.array([0, n], dtype=np.int32) indices = np.arange(n, dtype=np.int32) bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) def test_constructor5(self): # check for validations introduced in gh-13400 n = 8 data_1dim = np.ones(n) data = np.ones((n, n, n)) indptr = np.array([0, n]) indices = np.arange(n) with assert_raises(ValueError): # data ndim check bsr_matrix((data_1dim, indices, indptr)) with assert_raises(ValueError): # invalid blocksize bsr_matrix((data, indices, indptr), blocksize=(1, 1, 1)) with assert_raises(ValueError): # mismatching blocksize bsr_matrix((data, indices, indptr), blocksize=(1, 1)) def test_default_dtype(self): # As a numpy array, `values` has shape (2, 2, 1). values = [[[1], [1]], [[1], [1]]] indptr = np.array([0, 2], dtype=np.int32) indices = np.array([0, 1], dtype=np.int32) b = bsr_matrix((values, indices, indptr), blocksize=(2, 1)) assert b.dtype == np.array(values).dtype def test_bsr_tocsr(self): # check native conversion from BSR to CSR indptr = array([0, 2, 2, 4]) indices = array([0, 2, 2, 3]) data = zeros((4, 2, 3)) data[0] = array([[0, 1, 2], [3, 0, 5]]) data[1] = array([[0, 2, 4], [6, 0, 10]]) data[2] = array([[0, 4, 8], [12, 0, 20]]) data[3] = array([[0, 5, 10], [15, 0, 25]]) A = kron([[1, 0, 2, 0], [0, 0, 0, 0], [0, 0, 4, 5]], [[0, 1, 2], [3, 0, 5]]) Absr = bsr_matrix((data, indices, indptr), shape=(6, 12)) Acsr = Absr.tocsr() Acsr_via_coo = Absr.tocoo().tocsr() assert_equal(Acsr.toarray(), A) assert_equal(Acsr.toarray(), Acsr_via_coo.toarray()) def test_eliminate_zeros(self): data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T data = data.reshape(-1,2,2) indices = array([1, 2, 3, 4, 5, 6, 7, 8]) indptr = array([0, 3, 8]) asp = bsr_matrix((data, indices, indptr), shape=(4,20)) bsp = asp.copy() asp.eliminate_zeros() assert_array_equal(asp.nnz, 3*4) assert_array_equal(asp.toarray(), bsp.toarray()) # github issue #9687 def test_eliminate_zeros_all_zero(self): np.random.seed(0) m = bsr_matrix(np.random.random((12, 12)), blocksize=(2, 3)) # eliminate some blocks, but not all m.data[m.data <= 0.9] = 0 m.eliminate_zeros() assert_equal(m.nnz, 66) assert_array_equal(m.data.shape, (11, 2, 3)) # eliminate all remaining blocks m.data[m.data <= 1.0] = 0 m.eliminate_zeros() assert_equal(m.nnz, 0) assert_array_equal(m.data.shape, (0, 2, 3)) assert_array_equal(m.toarray(), np.zeros((12, 12))) # test fast path m.eliminate_zeros() assert_equal(m.nnz, 0) assert_array_equal(m.data.shape, (0, 2, 3)) assert_array_equal(m.toarray(), np.zeros((12, 12))) def test_bsr_matvec(self): A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5)) x = arange(A.shape[1]).reshape(-1,1) assert_equal(A*x, A.toarray() @ x) def test_bsr_matvecs(self): A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5)) x = arange(A.shape[1]*6).reshape(-1,6) assert_equal(A*x, A.toarray() @ x) @pytest.mark.xfail(run=False, reason='BSR does not have a __getitem__') def test_iterator(self): pass @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__') def test_setdiag(self): pass def test_resize_blocked(self): # test resize() with non-(1,1) blocksize D = np.array([[1, 0, 3, 4], [2, 0, 0, 0], [3, 0, 0, 0]]) S = self.spcreator(D, blocksize=(1, 2)) assert_(S.resize((3, 2)) is None) assert_array_equal(S.A, [[1, 0], [2, 0], [3, 0]]) S.resize((2, 2)) assert_array_equal(S.A, [[1, 0], [2, 0]]) S.resize((3, 2)) assert_array_equal(S.A, [[1, 0], [2, 0], [0, 0]]) S.resize((3, 4)) assert_array_equal(S.A, [[1, 0, 0, 0], [2, 0, 0, 0], [0, 0, 0, 0]]) assert_raises(ValueError, S.resize, (2, 3)) @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__') def test_setdiag_comprehensive(self): pass @pytest.mark.skipif(IS_COLAB, reason="exceeds memory limit") def test_scalar_idx_dtype(self): # Check that index dtype takes into account all parameters # passed to sparsetools, including the scalar ones indptr = np.zeros(2, dtype=np.int32) indices = np.zeros(0, dtype=np.int32) vals = np.zeros((0, 1, 1)) a = bsr_matrix((vals, indices, indptr), shape=(1, 2**31-1)) b = bsr_matrix((vals, indices, indptr), shape=(1, 2**31)) c = bsr_matrix((1, 2**31-1)) d = bsr_matrix((1, 2**31)) assert_equal(a.indptr.dtype, np.int32) assert_equal(b.indptr.dtype, np.int64) assert_equal(c.indptr.dtype, np.int32) assert_equal(d.indptr.dtype, np.int64) try: vals2 = np.zeros((0, 1, 2**31-1)) vals3 = np.zeros((0, 1, 2**31)) e = bsr_matrix((vals2, indices, indptr), shape=(1, 2**31-1)) f = bsr_matrix((vals3, indices, indptr), shape=(1, 2**31)) assert_equal(e.indptr.dtype, np.int32) assert_equal(f.indptr.dtype, np.int64) except (MemoryError, ValueError): # May fail on 32-bit Python e = 0 f = 0 # These shouldn't fail for x in [a, b, c, d, e, f]: x + x TestBSR.init_class() #------------------------------------------------------------------------------ # Tests for non-canonical representations (with duplicates, unsorted indices) #------------------------------------------------------------------------------ def _same_sum_duplicate(data, *inds, **kwargs): """Duplicates entries to produce the same matrix""" indptr = kwargs.pop('indptr', None) if np.issubdtype(data.dtype, np.bool_) or \ np.issubdtype(data.dtype, np.unsignedinteger): if indptr is None: return (data,) + inds else: return (data,) + inds + (indptr,) zeros_pos = (data == 0).nonzero() # duplicate data data = data.repeat(2, axis=0) data[::2] -= 1 data[1::2] = 1 # don't spoil all explicit zeros if zeros_pos[0].size > 0: pos = tuple(p[0] for p in zeros_pos) pos1 = (2*pos[0],) + pos[1:] pos2 = (2*pos[0]+1,) + pos[1:] data[pos1] = 0 data[pos2] = 0 inds = tuple(indices.repeat(2) for indices in inds) if indptr is None: return (data,) + inds else: return (data,) + inds + (indptr * 2,) class _NonCanonicalMixin: def spcreator(self, D, sorted_indices=False, **kwargs): """Replace D with a non-canonical equivalent: containing duplicate elements and explicit zeros""" construct = super().spcreator M = construct(D, **kwargs) zero_pos = (M.A == 0).nonzero() has_zeros = (zero_pos[0].size > 0) if has_zeros: k = zero_pos[0].size//2 with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") M = self._insert_explicit_zero(M, zero_pos[0][k], zero_pos[1][k]) arg1 = self._arg1_for_noncanonical(M, sorted_indices) if 'shape' not in kwargs: kwargs['shape'] = M.shape NC = construct(arg1, **kwargs) # check that result is valid if NC.dtype in [np.float32, np.complex64]: # For single-precision floats, the differences between M and NC # that are introduced by the extra operations involved in the # construction of NC necessitate a more lenient tolerance level # than the default. rtol = 1e-05 else: rtol = 1e-07 assert_allclose(NC.A, M.A, rtol=rtol) # check that at least one explicit zero if has_zeros: assert_((NC.data == 0).any()) # TODO check that NC has duplicates (which are not explicit zeros) return NC @pytest.mark.skip(reason='bool(matrix) counts explicit zeros') def test_bool(self): pass @pytest.mark.skip(reason='getnnz-axis counts explicit zeros') def test_getnnz_axis(self): pass @pytest.mark.skip(reason='nnz counts explicit zeros') def test_empty(self): pass class _NonCanonicalCompressedMixin(_NonCanonicalMixin): def _arg1_for_noncanonical(self, M, sorted_indices=False): """Return non-canonical constructor arg1 equivalent to M""" data, indices, indptr = _same_sum_duplicate(M.data, M.indices, indptr=M.indptr) if not sorted_indices: for start, stop in zip(indptr, indptr[1:]): indices[start:stop] = indices[start:stop][::-1].copy() data[start:stop] = data[start:stop][::-1].copy() return data, indices, indptr def _insert_explicit_zero(self, M, i, j): M[i,j] = 0 return M class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin): def test_getelement(self): def check(dtype, sorted_indices): D = array([[1,0,0], [4,3,0], [0,2,0], [0,0,0]], dtype=dtype) A = self.spcreator(D, sorted_indices=sorted_indices) M,N = D.shape for i in range(-M, M): for j in range(-N, N): assert_equal(A[i,j], D[i,j]) for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: assert_raises((IndexError, TypeError), A.__getitem__, ij) for dtype in supported_dtypes: for sorted_indices in [False, True]: check(np.dtype(dtype), sorted_indices) def test_setitem_sparse(self): D = np.eye(3) A = self.spcreator(D) B = self.spcreator([[1,2,3]]) D[1,:] = B.toarray() with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[1,:] = B assert_array_equal(A.toarray(), D) D[:,2] = B.toarray().ravel() with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive") A[:,2] = B.T assert_array_equal(A.toarray(), D) @pytest.mark.xfail(run=False, reason='inverse broken with non-canonical matrix') def test_inv(self): pass @pytest.mark.xfail(run=False, reason='solve broken with non-canonical matrix') def test_solve(self): pass class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR): pass class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC): pass class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR): def _insert_explicit_zero(self, M, i, j): x = M.tocsr() x[i,j] = 0 return x.tobsr(blocksize=M.blocksize) @pytest.mark.xfail(run=False, reason='diagonal broken with non-canonical BSR') def test_diagonal(self): pass @pytest.mark.xfail(run=False, reason='expm broken with non-canonical BSR') def test_expm(self): pass class TestCOONonCanonical(_NonCanonicalMixin, TestCOO): def _arg1_for_noncanonical(self, M, sorted_indices=None): """Return non-canonical constructor arg1 equivalent to M""" data, row, col = _same_sum_duplicate(M.data, M.row, M.col) return data, (row, col) def _insert_explicit_zero(self, M, i, j): M.data = np.r_[M.data.dtype.type(0), M.data] M.row = np.r_[M.row.dtype.type(i), M.row] M.col = np.r_[M.col.dtype.type(j), M.col] return M def test_setdiag_noncanonical(self): m = self.spcreator(np.eye(3)) m.sum_duplicates() m.setdiag([3, 2], k=1) m.sum_duplicates() assert_(np.all(np.diff(m.col) >= 0)) def cases_64bit(): TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA, # lil/dok->other conversion operations have get_index_dtype TestDOK, TestLIL ] # The following features are missing, so skip the tests: SKIP_TESTS = { 'test_expm': 'expm for 64-bit indices not available', 'test_inv': 'linsolve for 64-bit indices not available', 'test_solve': 'linsolve for 64-bit indices not available', 'test_scalar_idx_dtype': 'test implemented in base class', 'test_large_dimensions_reshape': 'test actually requires 64-bit to work', 'test_constructor_smallcol': 'test verifies int32 indexes', 'test_constructor_largecol': 'test verifies int64 indexes', } for cls in TEST_CLASSES: for method_name in sorted(dir(cls)): method = getattr(cls, method_name) if (method_name.startswith('test_') and not getattr(method, 'slow', False)): marks = [] msg = SKIP_TESTS.get(method_name) if bool(msg): marks += [pytest.mark.skip(reason=msg)] if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"): markers = getattr(method, 'pytestmark', []) for mark in markers: if mark.name in ('skipif', 'skip', 'xfail', 'xslow'): marks.append(mark) else: for mname in ['skipif', 'skip', 'xfail', 'xslow']: if hasattr(method, mname): marks += [getattr(method, mname)] yield pytest.param(cls, method_name, marks=marks) class Test64Bit: MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix] def _create_some_matrix(self, mat_cls, m, n): return mat_cls(np.random.rand(m, n)) def _compare_index_dtype(self, m, dtype): dtype = np.dtype(dtype) if isinstance(m, (csc_matrix, csr_matrix, bsr_matrix)): return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype) elif isinstance(m, coo_matrix): return (m.row.dtype == dtype) and (m.col.dtype == dtype) elif isinstance(m, dia_matrix): return (m.offsets.dtype == dtype) else: raise ValueError(f"matrix {m!r} has no integer indices") def test_decorator_maxval_limit(self): # Test that the with_64bit_maxval_limit decorator works @with_64bit_maxval_limit(maxval_limit=10) def check(mat_cls): m = mat_cls(np.random.rand(10, 1)) assert_(self._compare_index_dtype(m, np.int32)) m = mat_cls(np.random.rand(11, 1)) assert_(self._compare_index_dtype(m, np.int64)) for mat_cls in self.MAT_CLASSES: check(mat_cls) def test_decorator_maxval_random(self): # Test that the with_64bit_maxval_limit decorator works (2) @with_64bit_maxval_limit(random=True) def check(mat_cls): seen_32 = False seen_64 = False for k in range(100): m = self._create_some_matrix(mat_cls, 9, 9) seen_32 = seen_32 or self._compare_index_dtype(m, np.int32) seen_64 = seen_64 or self._compare_index_dtype(m, np.int64) if seen_32 and seen_64: break else: raise AssertionError("both 32 and 64 bit indices not seen") for mat_cls in self.MAT_CLASSES: check(mat_cls) def _check_resiliency(self, cls, method_name, **kw): # Resiliency test, to check that sparse matrices deal reasonably # with varying index data types. @with_64bit_maxval_limit(**kw) def check(cls, method_name): instance = cls() if hasattr(instance, 'setup_method'): instance.setup_method() try: getattr(instance, method_name)() finally: if hasattr(instance, 'teardown_method'): instance.teardown_method() check(cls, method_name) @pytest.mark.parametrize('cls,method_name', cases_64bit()) def test_resiliency_limit_10(self, cls, method_name): self._check_resiliency(cls, method_name, maxval_limit=10) @pytest.mark.parametrize('cls,method_name', cases_64bit()) def test_resiliency_random(self, cls, method_name): # bsr_matrix.eliminate_zeros relies on csr_matrix constructor # not making copies of index arrays --- this is not # necessarily true when we pick the index data type randomly self._check_resiliency(cls, method_name, random=True) @pytest.mark.parametrize('cls,method_name', cases_64bit()) def test_resiliency_all_32(self, cls, method_name): self._check_resiliency(cls, method_name, fixed_dtype=np.int32) @pytest.mark.parametrize('cls,method_name', cases_64bit()) def test_resiliency_all_64(self, cls, method_name): self._check_resiliency(cls, method_name, fixed_dtype=np.int64) @pytest.mark.parametrize('cls,method_name', cases_64bit()) def test_no_64(self, cls, method_name): self._check_resiliency(cls, method_name, assert_32bit=True) def test_downcast_intp(self): # Check that bincount and ufunc.reduceat intp downcasts are # dealt with. The point here is to trigger points in the code # that can fail on 32-bit systems when using 64-bit indices, # due to use of functions that only work with intp-size # indices. @with_64bit_maxval_limit(fixed_dtype=np.int64, downcast_maxval=1) def check_limited(): # These involve indices larger than `downcast_maxval` a = csc_matrix([[1, 2], [3, 4], [5, 6]]) assert_raises(AssertionError, a.getnnz, axis=1) assert_raises(AssertionError, a.sum, axis=0) a = csr_matrix([[1, 2, 3], [3, 4, 6]]) assert_raises(AssertionError, a.getnnz, axis=0) a = coo_matrix([[1, 2, 3], [3, 4, 5]]) assert_raises(AssertionError, a.getnnz, axis=0) @with_64bit_maxval_limit(fixed_dtype=np.int64) def check_unlimited(): # These involve indices larger than `downcast_maxval` a = csc_matrix([[1, 2], [3, 4], [5, 6]]) a.getnnz(axis=1) a.sum(axis=0) a = csr_matrix([[1, 2, 3], [3, 4, 6]]) a.getnnz(axis=0) a = coo_matrix([[1, 2, 3], [3, 4, 5]]) a.getnnz(axis=0) check_limited() check_unlimited()
186,416
35.75414
122
py
scipy
scipy-main/scipy/sparse/tests/test_array_api.py
import pytest import numpy as np import numpy.testing as npt import scipy.sparse import scipy.sparse.linalg as spla sparray_types = ('bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil') sparray_classes = [ getattr(scipy.sparse, f'{T}_array') for T in sparray_types ] A = np.array([ [0, 1, 2, 0], [2, 0, 0, 3], [1, 4, 0, 0] ]) B = np.array([ [0, 1], [2, 0] ]) X = np.array([ [1, 0, 0, 1], [2, 1, 2, 0], [0, 2, 1, 0], [0, 0, 1, 2] ], dtype=float) sparrays = [sparray(A) for sparray in sparray_classes] square_sparrays = [sparray(B) for sparray in sparray_classes] eig_sparrays = [sparray(X) for sparray in sparray_classes] parametrize_sparrays = pytest.mark.parametrize( "A", sparrays, ids=sparray_types ) parametrize_square_sparrays = pytest.mark.parametrize( "B", square_sparrays, ids=sparray_types ) parametrize_eig_sparrays = pytest.mark.parametrize( "X", eig_sparrays, ids=sparray_types ) @parametrize_sparrays def test_sum(A): assert not isinstance(A.sum(axis=0), np.matrix), \ "Expected array, got matrix" assert A.sum(axis=0).shape == (4,) assert A.sum(axis=1).shape == (3,) @parametrize_sparrays def test_mean(A): assert not isinstance(A.mean(axis=1), np.matrix), \ "Expected array, got matrix" @parametrize_sparrays def test_min_max(A): # Some formats don't support min/max operations, so we skip those here. if hasattr(A, 'min'): assert not isinstance(A.min(axis=1), np.matrix), \ "Expected array, got matrix" if hasattr(A, 'max'): assert not isinstance(A.max(axis=1), np.matrix), \ "Expected array, got matrix" if hasattr(A, 'argmin'): assert not isinstance(A.argmin(axis=1), np.matrix), \ "Expected array, got matrix" if hasattr(A, 'argmax'): assert not isinstance(A.argmax(axis=1), np.matrix), \ "Expected array, got matrix" @parametrize_sparrays def test_todense(A): assert not isinstance(A.todense(), np.matrix), \ "Expected array, got matrix" @parametrize_sparrays def test_indexing(A): if A.__class__.__name__[:3] in ('dia', 'coo', 'bsr'): return with pytest.raises(NotImplementedError): A[1, :] with pytest.raises(NotImplementedError): A[:, 1] with pytest.raises(NotImplementedError): A[1, [1, 2]] with pytest.raises(NotImplementedError): A[[1, 2], 1] assert A[[0]]._is_array, "Expected sparse array, got sparse matrix" assert A[1, [[1, 2]]]._is_array, "Expected ndarray, got sparse array" assert A[[[1, 2]], 1]._is_array, "Expected ndarray, got sparse array" assert A[:, [1, 2]]._is_array, "Expected sparse array, got something else" @parametrize_sparrays def test_dense_addition(A): X = np.random.random(A.shape) assert not isinstance(A + X, np.matrix), "Expected array, got matrix" @parametrize_sparrays def test_sparse_addition(A): assert (A + A)._is_array, "Expected array, got matrix" @parametrize_sparrays def test_elementwise_mul(A): assert np.all((A * A).todense() == A.power(2).todense()) @parametrize_sparrays def test_elementwise_rmul(A): with pytest.raises(TypeError): None * A with pytest.raises(ValueError): np.eye(3) * scipy.sparse.csr_array(np.arange(6).reshape(2, 3)) assert np.all((2 * A) == (A.todense() * 2)) assert np.all((A.todense() * A) == (A.todense() ** 2)) @parametrize_sparrays def test_matmul(A): assert np.all((A @ A.T).todense() == A.dot(A.T).todense()) @parametrize_square_sparrays def test_pow(B): assert (B**0)._is_array, "Expected array, got matrix" assert (B**2)._is_array, "Expected array, got matrix" @parametrize_sparrays def test_sparse_divide(A): assert isinstance(A / A, np.ndarray) @parametrize_sparrays def test_sparse_dense_divide(A): with pytest.warns(RuntimeWarning): assert (A / A.todense())._is_array @parametrize_sparrays def test_dense_divide(A): assert (A / 2)._is_array, "Expected array, got matrix" @parametrize_sparrays def test_no_A_attr(A): with pytest.warns(np.VisibleDeprecationWarning): A.A @parametrize_sparrays def test_no_H_attr(A): with pytest.warns(np.VisibleDeprecationWarning): A.H @parametrize_sparrays def test_getrow_getcol(A): assert A._getcol(0)._is_array assert A._getrow(0)._is_array # -- linalg -- @parametrize_sparrays def test_as_linearoperator(A): L = spla.aslinearoperator(A) npt.assert_allclose(L * [1, 2, 3, 4], A @ [1, 2, 3, 4]) @parametrize_square_sparrays def test_inv(B): if B.__class__.__name__[:3] != 'csc': return C = spla.inv(B) assert C._is_array npt.assert_allclose(C.todense(), np.linalg.inv(B.todense())) @parametrize_square_sparrays def test_expm(B): if B.__class__.__name__[:3] != 'csc': return Bmat = scipy.sparse.csc_matrix(B) C = spla.expm(B) assert C._is_array npt.assert_allclose( C.todense(), spla.expm(Bmat).todense() ) @parametrize_square_sparrays def test_expm_multiply(B): if B.__class__.__name__[:3] != 'csc': return npt.assert_allclose( spla.expm_multiply(B, np.array([1, 2])), spla.expm(B) @ [1, 2] ) @parametrize_sparrays def test_norm(A): C = spla.norm(A) npt.assert_allclose(C, np.linalg.norm(A.todense())) @parametrize_square_sparrays def test_onenormest(B): C = spla.onenormest(B) npt.assert_allclose(C, np.linalg.norm(B.todense(), 1)) @parametrize_square_sparrays def test_spsolve(B): if B.__class__.__name__[:3] not in ('csc', 'csr'): return npt.assert_allclose( spla.spsolve(B, [1, 2]), np.linalg.solve(B.todense(), [1, 2]) ) def test_spsolve_triangular(): X = scipy.sparse.csr_array([ [1, 0, 0, 0], [2, 1, 0, 0], [3, 2, 1, 0], [4, 3, 2, 1], ]) spla.spsolve_triangular(X, [1, 2, 3, 4]) @parametrize_square_sparrays def test_factorized(B): if B.__class__.__name__[:3] != 'csc': return LU = spla.factorized(B) npt.assert_allclose( LU(np.array([1, 2])), np.linalg.solve(B.todense(), [1, 2]) ) @parametrize_square_sparrays @pytest.mark.parametrize( "solver", ["bicg", "bicgstab", "cg", "cgs", "gmres", "lgmres", "minres", "qmr", "gcrotmk", "tfqmr"] ) def test_solvers(B, solver): if solver == "minres": kwargs = {} else: kwargs = {'atol': 1e-5} x, info = getattr(spla, solver)(B, np.array([1, 2]), **kwargs) assert info >= 0 # no errors, even if perhaps did not converge fully npt.assert_allclose(x, [1, 1], atol=1e-1) @parametrize_sparrays @pytest.mark.parametrize( "solver", ["lsqr", "lsmr"] ) def test_lstsqr(A, solver): x, *_ = getattr(spla, solver)(A, [1, 2, 3]) npt.assert_allclose(A @ x, [1, 2, 3]) @parametrize_eig_sparrays def test_eigs(X): e, v = spla.eigs(X, k=1) npt.assert_allclose( X @ v, e[0] * v ) @parametrize_eig_sparrays def test_eigsh(X): X = X + X.T e, v = spla.eigsh(X, k=1) npt.assert_allclose( X @ v, e[0] * v ) @parametrize_eig_sparrays def test_svds(X): u, s, vh = spla.svds(X, k=3) u2, s2, vh2 = np.linalg.svd(X.todense()) s = np.sort(s) s2 = np.sort(s2[:3]) npt.assert_allclose(s, s2, atol=1e-3) def test_splu(): X = scipy.sparse.csc_array([ [1, 0, 0, 0], [2, 1, 0, 0], [3, 2, 1, 0], [4, 3, 2, 1], ]) LU = spla.splu(X) npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0]) def test_spilu(): X = scipy.sparse.csc_array([ [1, 0, 0, 0], [2, 1, 0, 0], [3, 2, 1, 0], [4, 3, 2, 1], ]) LU = spla.spilu(X) npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0]) @parametrize_sparrays def test_power_operator(A): # https://github.com/scipy/scipy/issues/15948 npt.assert_equal((A**2).todense(), (A.todense())**2) @pytest.mark.parametrize( "cls,indices_attrs", [ ( scipy.sparse.csr_array, ["indices", "indptr"], ), ( scipy.sparse.csc_array, ["indices", "indptr"], ), ( scipy.sparse.coo_array, ["row", "col"], ), ] ) @pytest.mark.parametrize("expected_dtype", [np.int64, np.int32]) def test_index_dtype_compressed(cls, indices_attrs, expected_dtype): input_array = scipy.sparse.coo_array(np.arange(9).reshape(3, 3)) coo_tuple = ( input_array.data, ( input_array.row.astype(expected_dtype), input_array.col.astype(expected_dtype), ) ) result = cls(coo_tuple) for attr in indices_attrs: assert getattr(result, attr).dtype == expected_dtype result = cls(coo_tuple, shape=(3, 3)) for attr in indices_attrs: assert getattr(result, attr).dtype == expected_dtype if issubclass(cls, scipy.sparse._compressed._cs_matrix): input_array_csr = input_array.tocsr() csr_tuple = ( input_array_csr.data, input_array_csr.indices.astype(expected_dtype), input_array_csr.indptr.astype(expected_dtype), ) result = cls(csr_tuple) for attr in indices_attrs: assert getattr(result, attr).dtype == expected_dtype result = cls(csr_tuple, shape=(3, 3)) for attr in indices_attrs: assert getattr(result, attr).dtype == expected_dtype def test_default_is_matrix_diags(): m = scipy.sparse.diags([0, 1, 2]) assert not m._is_array def test_default_is_matrix_eye(): m = scipy.sparse.eye(3) assert not m._is_array def test_default_is_matrix_spdiags(): m = scipy.sparse.spdiags([1, 2, 3], 0, 3, 3) assert not m._is_array def test_default_is_matrix_identity(): m = scipy.sparse.identity(3) assert not m._is_array def test_default_is_matrix_kron_dense(): m = scipy.sparse.kron( np.array([[1, 2], [3, 4]]), np.array([[4, 3], [2, 1]]) ) assert not m._is_array def test_default_is_matrix_kron_sparse(): m = scipy.sparse.kron( np.array([[1, 2], [3, 4]]), np.array([[1, 0], [0, 0]]) ) assert not m._is_array def test_default_is_matrix_kronsum(): m = scipy.sparse.kronsum( np.array([[1, 0], [0, 1]]), np.array([[0, 1], [1, 0]]) ) assert not m._is_array def test_default_is_matrix_random(): m = scipy.sparse.random(3, 3) assert not m._is_array def test_default_is_matrix_rand(): m = scipy.sparse.rand(3, 3) assert not m._is_array @pytest.mark.parametrize("fn", (scipy.sparse.hstack, scipy.sparse.vstack)) def test_default_is_matrix_stacks(fn): """Same idea as `test_default_construction_fn_matrices`, but for the stacking creation functions.""" A = scipy.sparse.coo_matrix(np.eye(2)) B = scipy.sparse.coo_matrix([[0, 1], [1, 0]]) m = fn([A, B]) assert not m._is_array def test_blocks_default_construction_fn_matrices(): """Same idea as `test_default_construction_fn_matrices`, but for the block creation function""" A = scipy.sparse.coo_matrix(np.eye(2)) B = scipy.sparse.coo_matrix([[2], [0]]) C = scipy.sparse.coo_matrix([[3]]) # block diag m = scipy.sparse.block_diag((A, B, C)) assert not m._is_array # bmat m = scipy.sparse.bmat([[A, None], [None, C]]) assert not m._is_array def test_format_property(): for fmt in sparray_types: arr_cls = getattr(scipy.sparse, f"{fmt}_array") M = arr_cls([[1, 2]]) assert M.format == fmt assert M._format == fmt with pytest.raises(AttributeError): M.format = "qqq" def test_issparse(): m = scipy.sparse.eye(3) a = scipy.sparse.csr_array(m) assert not m._is_array assert a._is_array # Both sparse arrays and sparse matrices should be sparse assert scipy.sparse.issparse(a) assert scipy.sparse.issparse(m) # ndarray and array_likes are not sparse assert not scipy.sparse.issparse(a.todense()) assert not scipy.sparse.issparse(m.todense()) def test_isspmatrix(): m = scipy.sparse.eye(3) a = scipy.sparse.csr_array(m) assert not m._is_array assert a._is_array # Should only be true for sparse matrices, not sparse arrays assert not scipy.sparse.isspmatrix(a) assert scipy.sparse.isspmatrix(m) # ndarray and array_likes are not sparse assert not scipy.sparse.isspmatrix(a.todense()) assert not scipy.sparse.isspmatrix(m.todense()) @pytest.mark.parametrize( ("fmt", "fn"), ( ("bsr", scipy.sparse.isspmatrix_bsr), ("coo", scipy.sparse.isspmatrix_coo), ("csc", scipy.sparse.isspmatrix_csc), ("csr", scipy.sparse.isspmatrix_csr), ("dia", scipy.sparse.isspmatrix_dia), ("dok", scipy.sparse.isspmatrix_dok), ("lil", scipy.sparse.isspmatrix_lil), ), ) def test_isspmatrix_format(fmt, fn): m = scipy.sparse.eye(3, format=fmt) a = scipy.sparse.csr_array(m).asformat(fmt) assert not m._is_array assert a._is_array # Should only be true for sparse matrices, not sparse arrays assert not fn(a) assert fn(m) # ndarray and array_likes are not sparse assert not fn(a.todense()) assert not fn(m.todense())
13,436
23.520073
78
py
scipy
scipy-main/scipy/sparse/tests/test_csc.py
import numpy as np from numpy.testing import assert_array_almost_equal, assert_ from scipy.sparse import csr_matrix, csc_matrix, lil_matrix import pytest def test_csc_getrow(): N = 10 np.random.seed(0) X = np.random.random((N, N)) X[X > 0.7] = 0 Xcsc = csc_matrix(X) for i in range(N): arr_row = X[i:i + 1, :] csc_row = Xcsc.getrow(i) assert_array_almost_equal(arr_row, csc_row.toarray()) assert_(type(csc_row) is csr_matrix) def test_csc_getcol(): N = 10 np.random.seed(0) X = np.random.random((N, N)) X[X > 0.7] = 0 Xcsc = csc_matrix(X) for i in range(N): arr_col = X[:, i:i + 1] csc_col = Xcsc.getcol(i) assert_array_almost_equal(arr_col, csc_col.toarray()) assert_(type(csc_col) is csc_matrix) @pytest.mark.parametrize("matrix_input, axis, expected_shape", [(csc_matrix([[1, 0], [0, 0], [0, 2]]), 0, (0, 2)), (csc_matrix([[1, 0], [0, 0], [0, 2]]), 1, (3, 0)), (csc_matrix([[1, 0], [0, 0], [0, 2]]), 'both', (0, 0)), (csc_matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 2, 3, 0, 1]]), 0, (0, 6))]) def test_csc_empty_slices(matrix_input, axis, expected_shape): # see gh-11127 for related discussion slice_1 = matrix_input.A.shape[0] - 1 slice_2 = slice_1 slice_3 = slice_2 - 1 if axis == 0: actual_shape_1 = matrix_input[slice_1:slice_2, :].A.shape actual_shape_2 = matrix_input[slice_1:slice_3, :].A.shape elif axis == 1: actual_shape_1 = matrix_input[:, slice_1:slice_2].A.shape actual_shape_2 = matrix_input[:, slice_1:slice_3].A.shape elif axis == 'both': actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].A.shape actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].A.shape assert actual_shape_1 == expected_shape assert actual_shape_1 == actual_shape_2 @pytest.mark.parametrize('ax', (-2, -1, 0, 1, None)) def test_argmax_overflow(ax): # See gh-13646: Windows integer overflow for large sparse matrices. dim = (100000, 100000) A = lil_matrix(dim) A[-2, -2] = 42 A[-3, -3] = 0.1234 A = csc_matrix(A) idx = A.argmax(axis=ax) if ax is None: # idx is a single flattened index # that we need to convert to a 2d index pair; # can't do this with np.unravel_index because # the dimensions are too large ii = idx % dim[0] jj = idx // dim[0] else: # idx is an array of size of A.shape[ax]; # check the max index to make sure no overflows # we encountered assert np.count_nonzero(idx) == A.nnz ii, jj = np.max(idx), np.argmax(idx) assert A[ii, jj] == A[-2, -2]
2,902
28.323232
79
py
scipy
scipy-main/scipy/sparse/linalg/_expm_multiply.py
"""Compute the action of the matrix exponential.""" from warnings import warn import numpy as np import scipy.linalg import scipy.sparse.linalg from scipy.linalg._decomp_qr import qr from scipy.sparse._sputils import is_pydata_spmatrix from scipy.sparse.linalg import aslinearoperator from scipy.sparse.linalg._interface import IdentityOperator from scipy.sparse.linalg._onenormest import onenormest __all__ = ['expm_multiply'] def _exact_inf_norm(A): # A compatibility function which should eventually disappear. if scipy.sparse.issparse(A): return max(abs(A).sum(axis=1).flat) elif is_pydata_spmatrix(A): return max(abs(A).sum(axis=1)) else: return np.linalg.norm(A, np.inf) def _exact_1_norm(A): # A compatibility function which should eventually disappear. if scipy.sparse.issparse(A): return max(abs(A).sum(axis=0).flat) elif is_pydata_spmatrix(A): return max(abs(A).sum(axis=0)) else: return np.linalg.norm(A, 1) def _trace(A): # A compatibility function which should eventually disappear. if is_pydata_spmatrix(A): return A.to_scipy_sparse().trace() else: return A.trace() def traceest(A, m3, seed=None): """Estimate `np.trace(A)` using `3*m3` matrix-vector products. The result is not deterministic. Parameters ---------- A : LinearOperator Linear operator whose trace will be estimated. Has to be square. m3 : int Number of matrix-vector products divided by 3 used to estimate the trace. seed : optional Seed for `numpy.random.default_rng`. Can be provided to obtain deterministic results. Returns ------- trace : LinearOperator.dtype Estimate of the trace Notes ----- This is the Hutch++ algorithm given in [1]_. References ---------- .. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P. Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial and Applied Mathematics, 2021 https://doi.org/10.1137/1.9781611976496.16 """ rng = np.random.default_rng(seed) if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]: raise ValueError("Expected A to be like a square matrix.") n = A.shape[-1] S = rng.choice([-1.0, +1.0], [n, m3]) Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic') trQAQ = np.trace(Q.conj().T @ A.matmat(Q)) G = rng.choice([-1, +1], [n, m3]) right = G - Q@(Q.conj().T @ G) trGAG = np.trace(right.conj().T @ A.matmat(right)) return trQAQ + trGAG/m3 def _ident_like(A): # A compatibility function which should eventually disappear. if scipy.sparse.issparse(A): # Creates a sparse matrix in dia format out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype) if isinstance(A, scipy.sparse.spmatrix): return out.asformat(A.format) return scipy.sparse.dia_array(out).asformat(A.format) elif is_pydata_spmatrix(A): import sparse return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype) elif isinstance(A, scipy.sparse.linalg.LinearOperator): return IdentityOperator(A.shape, dtype=A.dtype) else: return np.eye(A.shape[0], A.shape[1], dtype=A.dtype) def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None, traceA=None): """ Compute the action of the matrix exponential of A on B. Parameters ---------- A : transposable linear operator The operator whose exponential is of interest. B : ndarray The matrix or vector to be multiplied by the matrix exponential of A. start : scalar, optional The starting time point of the sequence. stop : scalar, optional The end time point of the sequence, unless `endpoint` is set to False. In that case, the sequence consists of all but the last of ``num + 1`` evenly spaced time points, so that `stop` is excluded. Note that the step size changes when `endpoint` is False. num : int, optional Number of time points to use. endpoint : bool, optional If True, `stop` is the last time point. Otherwise, it is not included. traceA : scalar, optional Trace of `A`. If not given the trace is estimated for linear operators, or calculated exactly for sparse matrices. It is used to precondition `A`, thus an approximate trace is acceptable. For linear operators, `traceA` should be provided to ensure performance as the estimation is not guaranteed to be reliable for all cases. .. versionadded:: 1.9.0 Returns ------- expm_A_B : ndarray The result of the action :math:`e^{t_k A} B`. Warns ----- UserWarning If `A` is a linear operator and ``traceA=None`` (default). Notes ----- The optional arguments defining the sequence of evenly spaced time points are compatible with the arguments of `numpy.linspace`. The output ndarray shape is somewhat complicated so I explain it here. The ndim of the output could be either 1, 2, or 3. It would be 1 if you are computing the expm action on a single vector at a single time point. It would be 2 if you are computing the expm action on a vector at multiple time points, or if you are computing the expm action on a matrix at a single time point. It would be 3 if you want the action on a matrix with multiple columns at multiple time points. If multiple time points are requested, expm_A_B[0] will always be the action of the expm at the first time point, regardless of whether the action is on a vector or a matrix. References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011) "Computing the Action of the Matrix Exponential, with an Application to Exponential Integrators." SIAM Journal on Scientific Computing, 33 (2). pp. 488-511. ISSN 1064-8275 http://eprints.ma.man.ac.uk/1591/ .. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010) "Computing Matrix Functions." Acta Numerica, 19. 159-208. ISSN 0962-4929 http://eprints.ma.man.ac.uk/1451/ Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import expm, expm_multiply >>> A = csc_matrix([[1, 0], [0, 1]]) >>> A.toarray() array([[1, 0], [0, 1]], dtype=int64) >>> B = np.array([np.exp(-1.), np.exp(-2.)]) >>> B array([ 0.36787944, 0.13533528]) >>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True) array([[ 1. , 0.36787944], [ 1.64872127, 0.60653066], [ 2.71828183, 1. ]]) >>> expm(A).dot(B) # Verify 1st timestep array([ 1. , 0.36787944]) >>> expm(1.5*A).dot(B) # Verify 2nd timestep array([ 1.64872127, 0.60653066]) >>> expm(2*A).dot(B) # Verify 3rd timestep array([ 2.71828183, 1. ]) """ if all(arg is None for arg in (start, stop, num, endpoint)): X = _expm_multiply_simple(A, B, traceA=traceA) else: X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint, traceA=traceA) return X def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False): """ Compute the action of the matrix exponential at a single time point. Parameters ---------- A : transposable linear operator The operator whose exponential is of interest. B : ndarray The matrix to be multiplied by the matrix exponential of A. t : float A time point. traceA : scalar, optional Trace of `A`. If not given the trace is estimated for linear operators, or calculated exactly for sparse matrices. It is used to precondition `A`, thus an approximate trace is acceptable balance : bool Indicates whether or not to apply balancing. Returns ------- F : ndarray :math:`e^{t A} B` Notes ----- This is algorithm (3.2) in Al-Mohy and Higham (2011). """ if balance: raise NotImplementedError if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') if A.shape[1] != B.shape[0]: raise ValueError('shapes of matrices A {} and B {} are incompatible' .format(A.shape, B.shape)) ident = _ident_like(A) is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator) n = A.shape[0] if len(B.shape) == 1: n0 = 1 elif len(B.shape) == 2: n0 = B.shape[1] else: raise ValueError('expected B to be like a matrix or a vector') u_d = 2**-53 tol = u_d if traceA is None: if is_linear_operator: warn("Trace of LinearOperator not available, it will be estimated." " Provide `traceA` to ensure performance.", stacklevel=3) # m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might # speed up exponential calculation, but trace estimation is more costly traceA = traceest(A, m3=1) if is_linear_operator else _trace(A) mu = traceA / float(n) A = A - mu * ident A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A) if t*A_1_norm == 0: m_star, s = 0, 1 else: ell = 2 norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance) def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False): """ A helper function. """ if balance: raise NotImplementedError if tol is None: u_d = 2 ** -53 tol = u_d F = B eta = np.exp(t*mu / float(s)) for i in range(s): c1 = _exact_inf_norm(B) for j in range(m_star): coeff = t / float(s*(j+1)) B = coeff * A.dot(B) c2 = _exact_inf_norm(B) F = F + B if c1 + c2 <= tol * _exact_inf_norm(F): break c1 = c2 F = eta * F B = F return F # This table helps to compute bounds. # They seem to have been difficult to calculate, involving symbolic # manipulation of equations, followed by numerical root finding. _theta = { # The first 30 values are from table A.3 of Computing Matrix Functions. 1: 2.29e-16, 2: 2.58e-8, 3: 1.39e-5, 4: 3.40e-4, 5: 2.40e-3, 6: 9.07e-3, 7: 2.38e-2, 8: 5.00e-2, 9: 8.96e-2, 10: 1.44e-1, # 11 11: 2.14e-1, 12: 3.00e-1, 13: 4.00e-1, 14: 5.14e-1, 15: 6.41e-1, 16: 7.81e-1, 17: 9.31e-1, 18: 1.09, 19: 1.26, 20: 1.44, # 21 21: 1.62, 22: 1.82, 23: 2.01, 24: 2.22, 25: 2.43, 26: 2.64, 27: 2.86, 28: 3.08, 29: 3.31, 30: 3.54, # The rest are from table 3.1 of # Computing the Action of the Matrix Exponential. 35: 4.7, 40: 6.0, 45: 7.2, 50: 8.5, 55: 9.9, } def _onenormest_matrix_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False): """ Efficiently estimate the 1-norm of A^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. """ #XXX Eventually turn this into an API function in the _onenormest module, #XXX and remove its underscore, #XXX but wait until expm_multiply goes into scipy. from scipy.sparse.linalg._onenormest import onenormest return onenormest(aslinearoperator(A) ** p) class LazyOperatorNormInfo: """ Information about an operator is lazily computed. The information includes the exact 1-norm of the operator, in addition to estimates of 1-norms of powers of the operator. This uses the notation of Computing the Action (2011). This class is specialized enough to probably not be of general interest outside of this module. """ def __init__(self, A, A_1_norm=None, ell=2, scale=1): """ Provide the operator and some norm-related information. Parameters ---------- A : linear operator The operator of interest. A_1_norm : float, optional The exact 1-norm of A. ell : int, optional A technical parameter controlling norm estimation quality. scale : int, optional If specified, return the norms of scale*A instead of A. """ self._A = A self._A_1_norm = A_1_norm self._ell = ell self._d = {} self._scale = scale def set_scale(self,scale): """ Set the scale parameter. """ self._scale = scale def onenorm(self): """ Compute the exact 1-norm. """ if self._A_1_norm is None: self._A_1_norm = _exact_1_norm(self._A) return self._scale*self._A_1_norm def d(self, p): """ Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm. """ if p not in self._d: est = _onenormest_matrix_power(self._A, p, self._ell) self._d[p] = est ** (1.0 / p) return self._scale*self._d[p] def alpha(self, p): """ Lazily compute max(d(p), d(p+1)). """ return max(self.d(p), self.d(p+1)) def _compute_cost_div_m(m, p, norm_info): """ A helper function for computing bounds. This is equation (3.10). It measures cost in terms of the number of required matrix products. Parameters ---------- m : int A valid key of _theta. p : int A matrix power. norm_info : LazyOperatorNormInfo Information about 1-norms of related operators. Returns ------- cost_div_m : int Required number of matrix products divided by m. """ return int(np.ceil(norm_info.alpha(p) / _theta[m])) def _compute_p_max(m_max): """ Compute the largest positive integer p such that p*(p-1) <= m_max + 1. Do this in a slightly dumb way, but safe and not too slow. Parameters ---------- m_max : int A count related to bounds. """ sqrt_m_max = np.sqrt(m_max) p_low = int(np.floor(sqrt_m_max)) p_high = int(np.ceil(sqrt_m_max + 1)) return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1) def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2): """ A helper function for the _expm_multiply_* functions. Parameters ---------- norm_info : LazyOperatorNormInfo Information about norms of certain linear operators of interest. n0 : int Number of columns in the _expm_multiply_* B matrix. tol : float Expected to be :math:`2^{-24}` for single precision or :math:`2^{-53}` for double precision. m_max : int A value related to a bound. ell : int The number of columns used in the 1-norm approximation. This is usually taken to be small, maybe between 1 and 5. Returns ------- best_m : int Related to bounds for error control. best_s : int Amount of scaling. Notes ----- This is code fragment (3.1) in Al-Mohy and Higham (2011). The discussion of default values for m_max and ell is given between the definitions of equation (3.11) and the definition of equation (3.12). """ if ell < 1: raise ValueError('expected ell to be a positive integer') best_m = None best_s = None if _condition_3_13(norm_info.onenorm(), n0, m_max, ell): for m, theta in _theta.items(): s = int(np.ceil(norm_info.onenorm() / theta)) if best_m is None or m * s < best_m * best_s: best_m = m best_s = s else: # Equation (3.11). for p in range(2, _compute_p_max(m_max) + 1): for m in range(p*(p-1)-1, m_max+1): if m in _theta: s = _compute_cost_div_m(m, p, norm_info) if best_m is None or m * s < best_m * best_s: best_m = m best_s = s best_s = max(best_s, 1) return best_m, best_s def _condition_3_13(A_1_norm, n0, m_max, ell): """ A helper function for the _expm_multiply_* functions. Parameters ---------- A_1_norm : float The precomputed 1-norm of A. n0 : int Number of columns in the _expm_multiply_* B matrix. m_max : int A value related to a bound. ell : int The number of columns used in the 1-norm approximation. This is usually taken to be small, maybe between 1 and 5. Returns ------- value : bool Indicates whether or not the condition has been met. Notes ----- This is condition (3.13) in Al-Mohy and Higham (2011). """ # This is the rhs of equation (3.12). p_max = _compute_p_max(m_max) a = 2 * ell * p_max * (p_max + 3) # Evaluate the condition (3.13). b = _theta[m_max] / float(n0 * m_max) return A_1_norm <= a * b def _expm_multiply_interval(A, B, start=None, stop=None, num=None, endpoint=None, traceA=None, balance=False, status_only=False): """ Compute the action of the matrix exponential at multiple time points. Parameters ---------- A : transposable linear operator The operator whose exponential is of interest. B : ndarray The matrix to be multiplied by the matrix exponential of A. start : scalar, optional The starting time point of the sequence. stop : scalar, optional The end time point of the sequence, unless `endpoint` is set to False. In that case, the sequence consists of all but the last of ``num + 1`` evenly spaced time points, so that `stop` is excluded. Note that the step size changes when `endpoint` is False. num : int, optional Number of time points to use. traceA : scalar, optional Trace of `A`. If not given the trace is estimated for linear operators, or calculated exactly for sparse matrices. It is used to precondition `A`, thus an approximate trace is acceptable endpoint : bool, optional If True, `stop` is the last time point. Otherwise, it is not included. balance : bool Indicates whether or not to apply balancing. status_only : bool A flag that is set to True for some debugging and testing operations. Returns ------- F : ndarray :math:`e^{t_k A} B` status : int An integer status for testing and debugging. Notes ----- This is algorithm (5.2) in Al-Mohy and Higham (2011). There seems to be a typo, where line 15 of the algorithm should be moved to line 6.5 (between lines 6 and 7). """ if balance: raise NotImplementedError if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') if A.shape[1] != B.shape[0]: raise ValueError('shapes of matrices A {} and B {} are incompatible' .format(A.shape, B.shape)) ident = _ident_like(A) is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator) n = A.shape[0] if len(B.shape) == 1: n0 = 1 elif len(B.shape) == 2: n0 = B.shape[1] else: raise ValueError('expected B to be like a matrix or a vector') u_d = 2**-53 tol = u_d if traceA is None: if is_linear_operator: warn("Trace of LinearOperator not available, it will be estimated." " Provide `traceA` to ensure performance.", stacklevel=3) # m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might # speed up exponential calculation, but trace estimation is also costly # an educated guess would need to consider the number of time points traceA = traceest(A, m3=5) if is_linear_operator else _trace(A) mu = traceA / float(n) # Get the linspace samples, attempting to preserve the linspace defaults. linspace_kwargs = {'retstep': True} if num is not None: linspace_kwargs['num'] = num if endpoint is not None: linspace_kwargs['endpoint'] = endpoint samples, step = np.linspace(start, stop, **linspace_kwargs) # Convert the linspace output to the notation used by the publication. nsamples = len(samples) if nsamples < 2: raise ValueError('at least two time points are required') q = nsamples - 1 h = step t_0 = samples[0] t_q = samples[q] # Define the output ndarray. # Use an ndim=3 shape, such that the last two indices # are the ones that may be involved in level 3 BLAS operations. X_shape = (nsamples,) + B.shape X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float)) t = t_q - t_0 A = A - mu * ident A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A) ell = 2 norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) if t*A_1_norm == 0: m_star, s = 0, 1 else: m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) # Compute the expm action up to the initial time point. X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s) # Compute the expm action at the rest of the time points. if q <= s: if status_only: return 0 else: return _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell,n0) elif not (q % s): if status_only: return 1 else: return _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol) elif (q % s): if status_only: return 2 else: return _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol) else: raise Exception('internal error') def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0): """ A helper function, for the case q <= s. """ # Compute the new values of m_star and s which should be applied # over intervals of size t/q if norm_info.onenorm() == 0: m_star, s = 0, 1 else: norm_info.set_scale(1./q) m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) norm_info.set_scale(1) for k in range(q): X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s) return X, 0 def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol): """ A helper function, for the case q > s and q % s == 0. """ d = q // s input_shape = X.shape[1:] K_shape = (m_star + 1, ) + input_shape K = np.empty(K_shape, dtype=X.dtype) for i in range(s): Z = X[i*d] K[0] = Z high_p = 0 for k in range(1, d+1): F = K[0] c1 = _exact_inf_norm(F) for p in range(1, m_star+1): if p > high_p: K[p] = h * A.dot(K[p-1]) / float(p) coeff = float(pow(k, p)) F = F + coeff * K[p] inf_norm_K_p_1 = _exact_inf_norm(K[p]) c2 = coeff * inf_norm_K_p_1 if c1 + c2 <= tol * _exact_inf_norm(F): break c1 = c2 X[k + i*d] = np.exp(k*h*mu) * F return X, 1 def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol): """ A helper function, for the case q > s and q % s > 0. """ d = q // s j = q // d r = q - d * j input_shape = X.shape[1:] K_shape = (m_star + 1, ) + input_shape K = np.empty(K_shape, dtype=X.dtype) for i in range(j + 1): Z = X[i*d] K[0] = Z high_p = 0 if i < j: effective_d = d else: effective_d = r for k in range(1, effective_d+1): F = K[0] c1 = _exact_inf_norm(F) for p in range(1, m_star+1): if p == high_p + 1: K[p] = h * A.dot(K[p-1]) / float(p) high_p = p coeff = float(pow(k, p)) F = F + coeff * K[p] inf_norm_K_p_1 = _exact_inf_norm(K[p]) c2 = coeff * inf_norm_K_p_1 if c1 + c2 <= tol * _exact_inf_norm(F): break c1 = c2 X[k + i*d] = np.exp(k*h*mu) * F return X, 2
26,296
31.425401
80
py
scipy
scipy-main/scipy/sparse/linalg/_svdp.py
""" Python wrapper for PROPACK -------------------------- PROPACK is a collection of Fortran routines for iterative computation of partial SVDs of large matrices or linear operators. Based on BSD licensed pypropack project: http://github.com/jakevdp/pypropack Author: Jake Vanderplas <vanderplas@astro.washington.edu> PROPACK source is BSD licensed, and available at http://soi.stanford.edu/~rmunk/PROPACK/ """ __all__ = ['_svdp'] import numpy as np from scipy._lib._util import check_random_state from scipy.sparse.linalg import aslinearoperator from scipy.linalg import LinAlgError from ._propack import _spropack # type: ignore[attr-defined] from ._propack import _dpropack # type: ignore[attr-defined] from ._propack import _cpropack # type: ignore[attr-defined] from ._propack import _zpropack # type: ignore[attr-defined] _lansvd_dict = { 'f': _spropack.slansvd, 'd': _dpropack.dlansvd, 'F': _cpropack.clansvd, 'D': _zpropack.zlansvd, } _lansvd_irl_dict = { 'f': _spropack.slansvd_irl, 'd': _dpropack.dlansvd_irl, 'F': _cpropack.clansvd_irl, 'D': _zpropack.zlansvd_irl, } _which_converter = { 'LM': 'L', 'SM': 'S', } class _AProd: """ Wrapper class for linear operator The call signature of the __call__ method matches the callback of the PROPACK routines. """ def __init__(self, A): try: self.A = aslinearoperator(A) except TypeError: self.A = aslinearoperator(np.asarray(A)) def __call__(self, transa, m, n, x, y, sparm, iparm): if transa == 'n': y[:] = self.A.matvec(x) else: y[:] = self.A.rmatvec(x) @property def shape(self): return self.A.shape @property def dtype(self): try: return self.A.dtype except AttributeError: return self.A.matvec(np.zeros(self.A.shape[1])).dtype def _svdp(A, k, which='LM', irl_mode=True, kmax=None, compute_u=True, compute_v=True, v0=None, full_output=False, tol=0, delta=None, eta=None, anorm=0, cgs=False, elr=True, min_relgap=0.002, shifts=None, maxiter=None, random_state=None): """ Compute the singular value decomposition of a linear operator using PROPACK Parameters ---------- A : array_like, sparse matrix, or LinearOperator Operator for which SVD will be computed. If `A` is a LinearOperator object, it must define both ``matvec`` and ``rmatvec`` methods. k : int Number of singular values/vectors to compute which : {"LM", "SM"} Which singluar triplets to compute: - 'LM': compute triplets corresponding to the `k` largest singular values - 'SM': compute triplets corresponding to the `k` smallest singular values `which='SM'` requires `irl_mode=True`. Computes largest singular values by default. irl_mode : bool, optional If `True`, then compute SVD using IRL (implicitly restarted Lanczos) mode. Default is `True`. kmax : int, optional Maximal number of iterations / maximal dimension of the Krylov subspace. Default is ``10 * k``. compute_u : bool, optional If `True` (default) then compute left singular vectors, `u`. compute_v : bool, optional If `True` (default) then compute right singular vectors, `v`. tol : float, optional The desired relative accuracy for computed singular values. If not specified, it will be set based on machine precision. v0 : array_like, optional Starting vector for iterations: must be of length ``A.shape[0]``. If not specified, PROPACK will generate a starting vector. full_output : bool, optional If `True`, then return sigma_bound. Default is `False`. delta : float, optional Level of orthogonality to maintain between Lanczos vectors. Default is set based on machine precision. eta : float, optional Orthogonality cutoff. During reorthogonalization, vectors with component larger than `eta` along the Lanczos vector will be purged. Default is set based on machine precision. anorm : float, optional Estimate of ``||A||``. Default is `0`. cgs : bool, optional If `True`, reorthogonalization is done using classical Gram-Schmidt. If `False` (default), it is done using modified Gram-Schmidt. elr : bool, optional If `True` (default), then extended local orthogonality is enforced when obtaining singular vectors. min_relgap : float, optional The smallest relative gap allowed between any shift in IRL mode. Default is `0.001`. Accessed only if ``irl_mode=True``. shifts : int, optional Number of shifts per restart in IRL mode. Default is determined to satisfy ``k <= min(kmax-shifts, m, n)``. Must be >= 0, but choosing 0 might lead to performance degredation. Accessed only if ``irl_mode=True``. maxiter : int, optional Maximum number of restarts in IRL mode. Default is `1000`. Accessed only if ``irl_mode=True``. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate resamples. If `random_state` is ``None`` (or `np.random`), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- u : ndarray The `k` largest (``which="LM"``) or smallest (``which="SM"``) left singular vectors, ``shape == (A.shape[0], 3)``, returned only if ``compute_u=True``. sigma : ndarray The top `k` singular values, ``shape == (k,)`` vt : ndarray The `k` largest (``which="LM"``) or smallest (``which="SM"``) right singular vectors, ``shape == (3, A.shape[1])``, returned only if ``compute_v=True``. sigma_bound : ndarray the error bounds on the singular values sigma, returned only if ``full_output=True``. """ # 32-bit complex PROPACK functions have Fortran LAPACK ABI # incompatibility issues if np.iscomplexobj(A) and (np.intp(0).itemsize < 8): raise TypeError('PROPACK complex-valued SVD methods not available ' 'for 32-bit builds') random_state = check_random_state(random_state) which = which.upper() if which not in {'LM', 'SM'}: raise ValueError("`which` must be either 'LM' or 'SM'") if not irl_mode and which == 'SM': raise ValueError("`which`='SM' requires irl_mode=True") aprod = _AProd(A) typ = aprod.dtype.char try: lansvd_irl = _lansvd_irl_dict[typ] lansvd = _lansvd_dict[typ] except KeyError: # work with non-supported types using native system precision if np.iscomplexobj(np.empty(0, dtype=typ)): typ = np.dtype(complex).char else: typ = np.dtype(float).char lansvd_irl = _lansvd_irl_dict[typ] lansvd = _lansvd_dict[typ] m, n = aprod.shape if (k < 1) or (k > min(m, n)): raise ValueError("k must be positive and not greater than m or n") if kmax is None: kmax = 10*k if maxiter is None: maxiter = 1000 # guard against unnecessarily large kmax kmax = min(m + 1, n + 1, kmax) if kmax < k: raise ValueError( "kmax must be greater than or equal to k, " f"but kmax ({kmax}) < k ({k})") # convert python args to fortran args jobu = 'y' if compute_u else 'n' jobv = 'y' if compute_v else 'n' # these will be the output arrays u = np.zeros((m, kmax + 1), order='F', dtype=typ) v = np.zeros((n, kmax), order='F', dtype=typ) # Specify the starting vector. if v0 is all zero, PROPACK will generate # a random starting vector: the random seed cannot be controlled in that # case, so we'll instead use numpy to generate a random vector if v0 is None: u[:, 0] = random_state.uniform(size=m) if np.iscomplexobj(np.empty(0, dtype=typ)): # complex type u[:, 0] += 1j * random_state.uniform(size=m) else: try: u[:, 0] = v0 except ValueError: raise ValueError(f"v0 must be of length {m}") # process options for the fit if delta is None: delta = np.sqrt(np.finfo(typ).eps) if eta is None: eta = np.finfo(typ).eps ** 0.75 if irl_mode: doption = np.array((delta, eta, anorm, min_relgap), dtype=typ.lower()) # validate or find default shifts if shifts is None: shifts = kmax - k if k > min(kmax - shifts, m, n): raise ValueError('shifts must satisfy ' 'k <= min(kmax-shifts, m, n)!') elif shifts < 0: raise ValueError('shifts must be >= 0!') else: doption = np.array((delta, eta, anorm), dtype=typ.lower()) ioption = np.array((int(bool(cgs)), int(bool(elr))), dtype='i') # If computing `u` or `v` (left and right singular vectors, # respectively), `blocksize` controls how large a fraction of the # work is done via fast BLAS level 3 operations. A larger blocksize # may lead to faster computation at the expense of greater memory # consumption. `blocksize` must be ``>= 1``. Choosing blocksize # of 16, but docs don't specify; it's almost surely a # power of 2. blocksize = 16 # Determine lwork & liwork: # the required lengths are specified in the PROPACK documentation if compute_u or compute_v: lwork = m + n + 9*kmax + 5*kmax*kmax + 4 + max( 3*kmax*kmax + 4*kmax + 4, blocksize*max(m, n)) liwork = 8*kmax else: lwork = m + n + 9*kmax + 2*kmax*kmax + 4 + max(m + n, 4*kmax + 4) liwork = 2*kmax + 1 work = np.empty(lwork, dtype=typ.lower()) iwork = np.empty(liwork, dtype=np.int32) # dummy arguments: these are passed to aprod, and not used in this wrapper dparm = np.empty(1, dtype=typ.lower()) iparm = np.empty(1, dtype=np.int32) if typ.isupper(): # PROPACK documentation is unclear on the required length of zwork. # Use the same length Julia's wrapper uses # see https://github.com/JuliaSmoothOptimizers/PROPACK.jl/ zwork = np.empty(m + n + 32*m, dtype=typ) works = work, zwork, iwork else: works = work, iwork if irl_mode: u, sigma, bnd, v, info = lansvd_irl(_which_converter[which], jobu, jobv, m, n, shifts, k, maxiter, aprod, u, v, tol, *works, doption, ioption, dparm, iparm) else: u, sigma, bnd, v, info = lansvd(jobu, jobv, m, n, k, aprod, u, v, tol, *works, doption, ioption, dparm, iparm) if info > 0: raise LinAlgError( f"An invariant subspace of dimension {info} was found.") elif info < 0: raise LinAlgError( f"k={k} singular triplets did not converge within " f"kmax={kmax} iterations") # info == 0: The K largest (or smallest) singular triplets were computed # succesfully! return u[:, :k], sigma, v[:, :k].conj().T, bnd
11,685
35.291925
79
py
scipy
scipy-main/scipy/sparse/linalg/setup.py
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('linalg', parent_package, top_path) config.add_subpackage('_isolve') config.add_subpackage('_dsolve') config.add_subpackage('_eigen') config.add_data_dir('tests') # PROPACK config.add_subpackage('_propack') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
511
23.380952
62
py
scipy
scipy-main/scipy/sparse/linalg/eigen.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse.linalg` namespace for importing the functions # included below. import warnings from . import _eigen __all__ = [ # noqa: F822 'ArpackError', 'ArpackNoConvergence', 'eigs', 'eigsh', 'lobpcg', 'svds', 'arpack', 'test' ] eigen_modules = ['arpack'] def __dir__(): return __all__ def __getattr__(name): if name not in __all__ and name not in eigen_modules: raise AttributeError( "scipy.sparse.linalg.eigen is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse.linalg instead.") if name in eigen_modules: msg = (f'The module `scipy.sparse.linalg.eigen.{name}` is ' 'deprecated. All public names must be imported directly from ' 'the `scipy.sparse.linalg` namespace.') else: msg = (f"Please use `{name}` from the `scipy.sparse.linalg` namespace," " the `scipy.sparse.linalg.eigen` namespace is deprecated.") warnings.warn(msg, category=DeprecationWarning, stacklevel=2) return getattr(_eigen, name)
1,151
29.315789
79
py
scipy
scipy-main/scipy/sparse/linalg/isolve.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse.linalg` namespace for importing the functions # included below. import warnings from . import _isolve __all__ = [ # noqa: F822 'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres', 'lgmres', 'lsmr', 'lsqr', 'minres', 'qmr', 'tfqmr', 'utils', 'iterative', 'test' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.linalg.isolve is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse.linalg instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse.linalg` namespace, " "the `scipy.sparse.linalg.isolve` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_isolve, name)
904
28.193548
83
py
scipy
scipy-main/scipy/sparse/linalg/matfuncs.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse.linalg` namespace for importing the functions # included below. import warnings from . import _matfuncs __all__ = [ # noqa: F822 'expm', 'inv', 'solve', 'solve_triangular', 'isspmatrix', 'spsolve', 'is_pydata_spmatrix', 'LinearOperator', 'UPPER_TRIANGULAR', 'MatrixPowerOperator', 'ProductOperator' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.linalg.matfuncs is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse.linalg instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse.linalg` namespace, " "the `scipy.sparse.linalg.matfuncs` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_matfuncs, name)
948
29.612903
83
py
scipy
scipy-main/scipy/sparse/linalg/_norm.py
"""Sparse matrix norms. """ import numpy as np from scipy.sparse import issparse from scipy.sparse.linalg import svds import scipy.sparse as sp from numpy import sqrt, abs __all__ = ['norm'] def _sparse_frobenius_norm(x): data = sp._sputils._todata(x) return np.linalg.norm(data) def norm(x, ord=None, axis=None): """ Norm of a sparse matrix This function is able to return one of seven different matrix norms, depending on the value of the ``ord`` parameter. Parameters ---------- x : a sparse matrix Input sparse matrix. ord : {non-zero int, inf, -inf, 'fro'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object. axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. Returns ------- n : float or ndarray Notes ----- Some of the ord are not implemented because some associated functions like, _multi_svd_norm, are not yet available for sparse matrix. This docstring is modified based on numpy.linalg.norm. https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py The following norms can be calculated: ===== ============================ ord norm for sparse matrices ===== ============================ None Frobenius norm 'fro' Frobenius norm inf max(sum(abs(x), axis=1)) -inf min(sum(abs(x), axis=1)) 0 abs(x).sum(axis=axis) 1 max(sum(abs(x), axis=0)) -1 min(sum(abs(x), axis=0)) 2 Spectral norm (the largest singular value) -2 Not implemented other Not implemented ===== ============================ The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from scipy.sparse import * >>> import numpy as np >>> from scipy.sparse.linalg import norm >>> a = np.arange(9) - 4 >>> a array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], [-1, 0, 1], [ 2, 3, 4]]) >>> b = csr_matrix(b) >>> norm(b) 7.745966692414834 >>> norm(b, 'fro') 7.745966692414834 >>> norm(b, np.inf) 9 >>> norm(b, -np.inf) 2 >>> norm(b, 1) 7 >>> norm(b, -1) 6 The matrix 2-norm or the spectral norm is the largest singular value, computed approximately and with limitations. >>> b = diags([-1, 1], [0, 1], shape=(9, 10)) >>> norm(b, 2) 1.9753... """ if not issparse(x): raise TypeError("input is not sparse. use numpy.linalg.norm") # Check the default case first and handle it immediately. if axis is None and ord in (None, 'fro', 'f'): return _sparse_frobenius_norm(x) # Some norms require functions that are not implemented for all types. x = x.tocsr() if axis is None: axis = (0, 1) elif not isinstance(axis, tuple): msg = "'axis' must be None, an integer or a tuple of integers" try: int_axis = int(axis) except TypeError as e: raise TypeError(msg) from e if axis != int_axis: raise TypeError(msg) axis = (int_axis,) nd = 2 if len(axis) == 2: row_axis, col_axis = axis if not (-nd <= row_axis < nd and -nd <= col_axis < nd): raise ValueError('Invalid axis %r for an array with shape %r' % (axis, x.shape)) if row_axis % nd == col_axis % nd: raise ValueError('Duplicate axes given.') if ord == 2: # Only solver="lobpcg" supports all numpy dtypes _, s, _ = svds(x, k=1, solver="lobpcg") return s[0] elif ord == -2: raise NotImplementedError #return _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0] elif ord == np.inf: return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0] elif ord == -1: return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0] elif ord == -np.inf: return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0] elif ord in (None, 'f', 'fro'): # The axis order does not matter for this norm. return _sparse_frobenius_norm(x) else: raise ValueError("Invalid norm order for matrices.") elif len(axis) == 1: a, = axis if not (-nd <= a < nd): raise ValueError('Invalid axis %r for an array with shape %r' % (axis, x.shape)) if ord == np.inf: M = abs(x).max(axis=a) elif ord == -np.inf: M = abs(x).min(axis=a) elif ord == 0: # Zero norm M = (x != 0).sum(axis=a) elif ord == 1: # special case for speedup M = abs(x).sum(axis=a) elif ord in (2, None): M = sqrt(abs(x).power(2).sum(axis=a)) else: try: ord + 1 except TypeError as e: raise ValueError('Invalid norm order for vectors.') from e M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord) if hasattr(M, 'toarray'): return M.toarray().ravel() elif hasattr(M, 'A'): return M.A.ravel() else: return M.ravel() else: raise ValueError("Improper number of dimensions to norm.")
6,069
30.28866
79
py
scipy
scipy-main/scipy/sparse/linalg/_onenormest.py
"""Sparse block 1-norm estimator. """ import numpy as np from scipy.sparse.linalg import aslinearoperator __all__ = ['onenormest'] def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False): """ Compute a lower bound of the 1-norm of a sparse matrix. Parameters ---------- A : ndarray or other linear operator A linear operator that can be transposed and that can produce matrix products. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. Notes ----- This is algorithm 2.4 of [1]. In [2] it is described as follows. "This algorithm typically requires the evaluation of about 4t matrix-vector products and almost invariably produces a norm estimate (which is, in fact, a lower bound on the norm) correct to within a factor 3." .. versionadded:: 0.13.0 References ---------- .. [1] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm for Matrix 1-Norm Estimation, with an Application to 1-Norm Pseudospectra." SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201. .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009), "A new scaling and squaring algorithm for the matrix exponential." SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989. Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import onenormest >>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float) >>> A.toarray() array([[ 1., 0., 0.], [ 5., 8., 2.], [ 0., -1., 0.]]) >>> onenormest(A) 9.0 >>> np.linalg.norm(A.toarray(), ord=1) 9.0 """ # Check the input. A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError('expected the operator to act like a square matrix') # If the operator size is small compared to t, # then it is easier to compute the exact norm. # Otherwise estimate the norm. n = A.shape[1] if t >= n: A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n))) if A_explicit.shape != (n, n): raise Exception('internal error: ', 'unexpected shape ' + str(A_explicit.shape)) col_abs_sums = abs(A_explicit).sum(axis=0) if col_abs_sums.shape != (n, ): raise Exception('internal error: ', 'unexpected shape ' + str(col_abs_sums.shape)) argmax_j = np.argmax(col_abs_sums) v = elementary_vector(n, argmax_j) w = A_explicit[:, argmax_j] est = col_abs_sums[argmax_j] else: est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax) # Report the norm estimate along with some certificates of the estimate. if compute_v or compute_w: result = (est,) if compute_v: result += (v,) if compute_w: result += (w,) return result else: return est def _blocked_elementwise(func): """ Decorator for an elementwise function, to apply it blockwise along first dimension, to avoid excessive memory usage in temporaries. """ block_size = 2**20 def wrapper(x): if x.shape[0] < block_size: return func(x) else: y0 = func(x[:block_size]) y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype) y[:block_size] = y0 del y0 for j in range(block_size, x.shape[0], block_size): y[j:j+block_size] = func(x[j:j+block_size]) return y return wrapper @_blocked_elementwise def sign_round_up(X): """ This should do the right thing for both real and complex matrices. From Higham and Tisseur: "Everything in this section remains valid for complex matrices provided that sign(A) is redefined as the matrix (aij / |aij|) (and sign(0) = 1) transposes are replaced by conjugate transposes." """ Y = X.copy() Y[Y == 0] = 1 Y /= np.abs(Y) return Y @_blocked_elementwise def _max_abs_axis1(X): return np.max(np.abs(X), axis=1) def _sum_abs_axis0(X): block_size = 2**20 r = None for j in range(0, X.shape[0], block_size): y = np.sum(np.abs(X[j:j+block_size]), axis=0) if r is None: r = y else: r += y return r def elementary_vector(n, i): v = np.zeros(n, dtype=float) v[i] = 1 return v def vectors_are_parallel(v, w): # Columns are considered parallel when they are equal or negative. # Entries are required to be in {-1, 1}, # which guarantees that the magnitudes of the vectors are identical. if v.ndim != 1 or v.shape != w.shape: raise ValueError('expected conformant vectors with entries in {-1,1}') n = v.shape[0] return np.dot(v, w) == n def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y): for v in X.T: if not any(vectors_are_parallel(v, w) for w in Y.T): return False return True def column_needs_resampling(i, X, Y=None): # column i of X needs resampling if either # it is parallel to a previous column of X or # it is parallel to a column of Y n, t = X.shape v = X[:, i] if any(vectors_are_parallel(v, X[:, j]) for j in range(i)): return True if Y is not None: if any(vectors_are_parallel(v, w) for w in Y.T): return True return False def resample_column(i, X): X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1 def less_than_or_close(a, b): return np.allclose(a, b) or (a < b) def _algorithm_2_2(A, AT, t): """ This is Algorithm 2.2. Parameters ---------- A : ndarray or other linear operator A linear operator that can produce matrix products. AT : ndarray or other linear operator The transpose of A. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Returns ------- g : sequence A non-negative decreasing vector such that g[j] is a lower bound for the 1-norm of the column of A of jth largest 1-norm. The first entry of this vector is therefore a lower bound on the 1-norm of the linear operator A. This sequence has length t. ind : sequence The ith entry of ind is the index of the column A whose 1-norm is given by g[i]. This sequence of indices has length t, and its entries are chosen from range(n), possibly with repetition, where n is the order of the operator A. Notes ----- This algorithm is mainly for testing. It uses the 'ind' array in a way that is similar to its usage in algorithm 2.4. This algorithm 2.2 may be easier to test, so it gives a chance of uncovering bugs related to indexing which could have propagated less noticeably to algorithm 2.4. """ A_linear_operator = aslinearoperator(A) AT_linear_operator = aslinearoperator(AT) n = A_linear_operator.shape[0] # Initialize the X block with columns of unit 1-norm. X = np.ones((n, t)) if t > 1: X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1 X /= float(n) # Iteratively improve the lower bounds. # Track extra things, to assert invariants for debugging. g_prev = None h_prev = None k = 1 ind = range(t) while True: Y = np.asarray(A_linear_operator.matmat(X)) g = _sum_abs_axis0(Y) best_j = np.argmax(g) g.sort() g = g[::-1] S = sign_round_up(Y) Z = np.asarray(AT_linear_operator.matmat(S)) h = _max_abs_axis1(Z) # If this algorithm runs for fewer than two iterations, # then its return values do not have the properties indicated # in the description of the algorithm. # In particular, the entries of g are not 1-norms of any # column of A until the second iteration. # Therefore we will require the algorithm to run for at least # two iterations, even though this requirement is not stated # in the description of the algorithm. if k >= 2: if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])): break ind = np.argsort(h)[::-1][:t] h = h[ind] for j in range(t): X[:, j] = elementary_vector(n, ind[j]) # Check invariant (2.2). if k >= 2: if not less_than_or_close(g_prev[0], h_prev[0]): raise Exception('invariant (2.2) is violated') if not less_than_or_close(h_prev[0], g[0]): raise Exception('invariant (2.2) is violated') # Check invariant (2.3). if k >= 3: for j in range(t): if not less_than_or_close(g[j], g_prev[j]): raise Exception('invariant (2.3) is violated') # Update for the next iteration. g_prev = g h_prev = h k += 1 # Return the lower bounds and the corresponding column indices. return g, ind def _onenormest_core(A, AT, t, itmax): """ Compute a lower bound of the 1-norm of a sparse matrix. Parameters ---------- A : ndarray or other linear operator A linear operator that can produce matrix products. AT : ndarray or other linear operator The transpose of A. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. itmax : int, optional Use at most this many iterations. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. nmults : int, optional The number of matrix products that were computed. nresamples : int, optional The number of times a parallel column was observed, necessitating a re-randomization of the column. Notes ----- This is algorithm 2.4. """ # This function is a more or less direct translation # of Algorithm 2.4 from the Higham and Tisseur (2000) paper. A_linear_operator = aslinearoperator(A) AT_linear_operator = aslinearoperator(AT) if itmax < 2: raise ValueError('at least two iterations are required') if t < 1: raise ValueError('at least one column is required') n = A.shape[0] if t >= n: raise ValueError('t should be smaller than the order of A') # Track the number of big*small matrix multiplications # and the number of resamplings. nmults = 0 nresamples = 0 # "We now explain our choice of starting matrix. We take the first # column of X to be the vector of 1s [...] This has the advantage that # for a matrix with nonnegative elements the algorithm converges # with an exact estimate on the second iteration, and such matrices # arise in applications [...]" X = np.ones((n, t), dtype=float) # "The remaining columns are chosen as rand{-1,1}, # with a check for and correction of parallel columns, # exactly as for S in the body of the algorithm." if t > 1: for i in range(1, t): # These are technically initial samples, not resamples, # so the resampling count is not incremented. resample_column(i, X) for i in range(t): while column_needs_resampling(i, X): resample_column(i, X) nresamples += 1 # "Choose starting matrix X with columns of unit 1-norm." X /= float(n) # "indices of used unit vectors e_j" ind_hist = np.zeros(0, dtype=np.intp) est_old = 0 S = np.zeros((n, t), dtype=float) k = 1 ind = None while True: Y = np.asarray(A_linear_operator.matmat(X)) nmults += 1 mags = _sum_abs_axis0(Y) est = np.max(mags) best_j = np.argmax(mags) if est > est_old or k == 2: if k >= 2: ind_best = ind[best_j] w = Y[:, best_j] # (1) if k >= 2 and est <= est_old: est = est_old break est_old = est S_old = S if k > itmax: break S = sign_round_up(Y) del Y # (2) if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old): break if t > 1: # "Ensure that no column of S is parallel to another column of S # or to a column of S_old by replacing columns of S by rand{-1,1}." for i in range(t): while column_needs_resampling(i, S, S_old): resample_column(i, S) nresamples += 1 del S_old # (3) Z = np.asarray(AT_linear_operator.matmat(S)) nmults += 1 h = _max_abs_axis1(Z) del Z # (4) if k >= 2 and max(h) == h[ind_best]: break # "Sort h so that h_first >= ... >= h_last # and re-order ind correspondingly." # # Later on, we will need at most t+len(ind_hist) largest # entries, so drop the rest ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy() del h if t > 1: # (5) # Break if the most promising t vectors have been visited already. if np.in1d(ind[:t], ind_hist).all(): break # Put the most promising unvisited vectors at the front of the list # and put the visited vectors at the end of the list. # Preserve the order of the indices induced by the ordering of h. seen = np.in1d(ind, ind_hist) ind = np.concatenate((ind[~seen], ind[seen])) for j in range(t): X[:, j] = elementary_vector(n, ind[j]) new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)] ind_hist = np.concatenate((ind_hist, new_ind)) k += 1 v = elementary_vector(n, ind_best) return est, v, w, nmults, nresamples
15,486
32.09188
80
py
scipy
scipy-main/scipy/sparse/linalg/dsolve.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse.linalg` namespace for importing the functions # included below. import warnings from . import _dsolve __all__ = [ # noqa: F822 'MatrixRankWarning', 'SuperLU', 'factorized', 'spilu', 'splu', 'spsolve', 'spsolve_triangular', 'use_solver', 'linsolve', 'test' ] dsolve_modules = ['linsolve'] def __dir__(): return __all__ def __getattr__(name): if name not in __all__ and name not in dsolve_modules: raise AttributeError( "scipy.sparse.linalg.dsolve is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse.linalg instead.") if name in dsolve_modules: msg = (f'The module `scipy.sparse.linalg.dsolve.{name}` is ' 'deprecated. All public names must be imported directly from ' 'the `scipy.sparse.linalg` namespace.') else: msg = (f"Please use `{name}` from the `scipy.sparse.linalg` namespace," " the `scipy.sparse.linalg.eigen` namespace is deprecated.") warnings.warn(msg, category=DeprecationWarning, stacklevel=2) return getattr(_dsolve, name)
1,203
29.871795
79
py
scipy
scipy-main/scipy/sparse/linalg/__init__.py
""" Sparse linear algebra (:mod:`scipy.sparse.linalg`) ================================================== .. currentmodule:: scipy.sparse.linalg Abstract linear operators ------------------------- .. autosummary:: :toctree: generated/ LinearOperator -- abstract representation of a linear operator aslinearoperator -- convert an object to an abstract linear operator Matrix Operations ----------------- .. autosummary:: :toctree: generated/ inv -- compute the sparse matrix inverse expm -- compute the sparse matrix exponential expm_multiply -- compute the product of a matrix exponential and a matrix Matrix norms ------------ .. autosummary:: :toctree: generated/ norm -- Norm of a sparse matrix onenormest -- Estimate the 1-norm of a sparse matrix Solving linear problems ----------------------- Direct methods for linear equation systems: .. autosummary:: :toctree: generated/ spsolve -- Solve the sparse linear system Ax=b spsolve_triangular -- Solve the sparse linear system Ax=b for a triangular matrix factorized -- Pre-factorize matrix to a function solving a linear system MatrixRankWarning -- Warning on exactly singular matrices use_solver -- Select direct solver to use Iterative methods for linear equation systems: .. autosummary:: :toctree: generated/ bicg -- Use BIConjugate Gradient iteration to solve A x = b bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b cg -- Use Conjugate Gradient iteration to solve A x = b cgs -- Use Conjugate Gradient Squared iteration to solve A x = b gmres -- Use Generalized Minimal RESidual iteration to solve A x = b lgmres -- Solve a matrix equation using the LGMRES algorithm minres -- Use MINimum RESidual iteration to solve Ax = b qmr -- Use Quasi-Minimal Residual iteration to solve A x = b gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve A x = b Iterative methods for least-squares problems: .. autosummary:: :toctree: generated/ lsqr -- Find the least-squares solution to a sparse linear equation system lsmr -- Find the least-squares solution to a sparse linear equation system Matrix factorizations --------------------- Eigenvalue problems: .. autosummary:: :toctree: generated/ eigs -- Find k eigenvalues and eigenvectors of the square matrix A eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning Singular values problems: .. autosummary:: :toctree: generated/ svds -- Compute k singular values/vectors for a sparse matrix The `svds` function supports the following solvers: .. toctree:: sparse.linalg.svds-arpack sparse.linalg.svds-lobpcg sparse.linalg.svds-propack Complete or incomplete LU factorizations .. autosummary:: :toctree: generated/ splu -- Compute a LU decomposition for a sparse matrix spilu -- Compute an incomplete LU decomposition for a sparse matrix SuperLU -- Object representing an LU factorization Exceptions ---------- .. autosummary:: :toctree: generated/ ArpackNoConvergence ArpackError """ from ._isolve import * from ._dsolve import * from ._interface import * from ._eigen import * from ._matfuncs import * from ._onenormest import * from ._norm import * from ._expm_multiply import * # Deprecated namespaces, to be removed in v2.0.0 from . import isolve, dsolve, interface, eigen, matfuncs __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
3,717
26.138686
84
py
scipy
scipy-main/scipy/sparse/linalg/interface.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.sparse.linalg` namespace for importing the functions # included below. import warnings from . import _interface __all__ = [ # noqa: F822 'LinearOperator', 'aslinearoperator', 'isspmatrix', 'isshape', 'isintlike', 'asmatrix', 'is_pydata_spmatrix', 'MatrixLinearOperator', 'IdentityOperator' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.sparse.linalg.interface is deprecated and has no attribute " f"{name}. Try looking in scipy.sparse.linalg instead.") warnings.warn(f"Please use `{name}` from the `scipy.sparse.linalg` namespace, " "the `scipy.sparse.linalg.interface` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_interface, name)
935
29.193548
83
py
scipy
scipy-main/scipy/sparse/linalg/_matfuncs.py
""" Sparse matrix functions """ # # Authors: Travis Oliphant, March 2002 # Anthony Scopatz, August 2012 (Sparse Updates) # Jake Vanderplas, August 2012 (Sparse Updates) # __all__ = ['expm', 'inv'] import numpy as np from scipy.linalg._basic import solve, solve_triangular from scipy.sparse._base import issparse from scipy.sparse.linalg import spsolve from scipy.sparse._sputils import is_pydata_spmatrix import scipy.sparse import scipy.sparse.linalg from scipy.sparse.linalg._interface import LinearOperator from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm UPPER_TRIANGULAR = 'upper_triangular' def inv(A): """ Compute the inverse of a sparse matrix Parameters ---------- A : (M, M) sparse matrix square matrix to be inverted Returns ------- Ainv : (M, M) sparse matrix inverse of `A` Notes ----- This computes the sparse inverse of `A`. If the inverse of `A` is expected to be non-sparse, it will likely be faster to convert `A` to dense and use `scipy.linalg.inv`. Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import inv >>> A = csc_matrix([[1., 0.], [1., 2.]]) >>> Ainv = inv(A) >>> Ainv <2x2 sparse matrix of type '<class 'numpy.float64'>' with 3 stored elements in Compressed Sparse Column format> >>> A.dot(Ainv) <2x2 sparse matrix of type '<class 'numpy.float64'>' with 2 stored elements in Compressed Sparse Column format> >>> A.dot(Ainv).toarray() array([[ 1., 0.], [ 0., 1.]]) .. versionadded:: 0.12.0 """ # Check input if not (scipy.sparse.issparse(A) or is_pydata_spmatrix(A)): raise TypeError('Input must be a sparse matrix') # Use sparse direct solver to solve "AX = I" accurately I = _ident_like(A) Ainv = spsolve(A, I) return Ainv def _onenorm_matrix_power_nnm(A, p): """ Compute the 1-norm of a non-negative integer power of a non-negative matrix. Parameters ---------- A : a square ndarray or matrix or sparse matrix Input matrix with non-negative entries. p : non-negative integer The power to which the matrix is to be raised. Returns ------- out : float The 1-norm of the matrix power p of A. """ # Check input if int(p) != p or p < 0: raise ValueError('expected non-negative integer p') p = int(p) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') # Explicitly make a column vector so that this works when A is a # numpy matrix (in addition to ndarray and sparse matrix). v = np.ones((A.shape[0], 1), dtype=float) M = A.T for i in range(p): v = M.dot(v) return np.max(v) def _is_upper_triangular(A): # This function could possibly be of wider interest. if issparse(A): lower_part = scipy.sparse.tril(A, -1) # Check structural upper triangularity, # then coincidental upper triangularity if needed. return lower_part.nnz == 0 or lower_part.count_nonzero() == 0 elif is_pydata_spmatrix(A): import sparse lower_part = sparse.tril(A, -1) return lower_part.nnz == 0 else: return not np.tril(A, -1).any() def _smart_matrix_product(A, B, alpha=None, structure=None): """ A matrix product that knows about sparse and structured matrices. Parameters ---------- A : 2d ndarray First matrix. B : 2d ndarray Second matrix. alpha : float The matrix product will be scaled by this constant. structure : str, optional A string describing the structure of both matrices `A` and `B`. Only `upper_triangular` is currently supported. Returns ------- M : 2d ndarray Matrix product of A and B. """ if len(A.shape) != 2: raise ValueError('expected A to be a rectangular matrix') if len(B.shape) != 2: raise ValueError('expected B to be a rectangular matrix') f = None if structure == UPPER_TRIANGULAR: if (not issparse(A) and not issparse(B) and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)): f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B)) if f is not None: if alpha is None: alpha = 1. out = f(alpha, A, B) else: if alpha is None: out = A.dot(B) else: out = alpha * A.dot(B) return out class MatrixPowerOperator(LinearOperator): def __init__(self, A, p, structure=None): if A.ndim != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') if p < 0: raise ValueError('expected p to be a non-negative integer') self._A = A self._p = p self._structure = structure self.dtype = A.dtype self.ndim = A.ndim self.shape = A.shape def _matvec(self, x): for i in range(self._p): x = self._A.dot(x) return x def _rmatvec(self, x): A_T = self._A.T x = x.ravel() for i in range(self._p): x = A_T.dot(x) return x def _matmat(self, X): for i in range(self._p): X = _smart_matrix_product(self._A, X, structure=self._structure) return X @property def T(self): return MatrixPowerOperator(self._A.T, self._p) class ProductOperator(LinearOperator): """ For now, this is limited to products of multiple square matrices. """ def __init__(self, *args, **kwargs): self._structure = kwargs.get('structure', None) for A in args: if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError( 'For now, the ProductOperator implementation is ' 'limited to the product of multiple square matrices.') if args: n = args[0].shape[0] for A in args: for d in A.shape: if d != n: raise ValueError( 'The square matrices of the ProductOperator ' 'must all have the same shape.') self.shape = (n, n) self.ndim = len(self.shape) self.dtype = np.result_type(*[x.dtype for x in args]) self._operator_sequence = args def _matvec(self, x): for A in reversed(self._operator_sequence): x = A.dot(x) return x def _rmatvec(self, x): x = x.ravel() for A in self._operator_sequence: x = A.T.dot(x) return x def _matmat(self, X): for A in reversed(self._operator_sequence): X = _smart_matrix_product(A, X, structure=self._structure) return X @property def T(self): T_args = [A.T for A in reversed(self._operator_sequence)] return ProductOperator(*T_args) def _onenormest_matrix_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False, structure=None): """ Efficiently estimate the 1-norm of A^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. """ return scipy.sparse.linalg.onenormest( MatrixPowerOperator(A, p, structure=structure)) def _onenormest_product(operator_seq, t=2, itmax=5, compute_v=False, compute_w=False, structure=None): """ Efficiently estimate the 1-norm of the matrix product of the args. Parameters ---------- operator_seq : linear operator sequence Matrices whose 1-norm of product is to be computed. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. structure : str, optional A string describing the structure of all operators. Only `upper_triangular` is currently supported. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input. """ return scipy.sparse.linalg.onenormest( ProductOperator(*operator_seq, structure=structure)) class _ExpmPadeHelper: """ Help lazily evaluate a matrix exponential. The idea is to not do more work than we need for high expm precision, so we lazily compute matrix powers and store or precompute other properties of the matrix. """ def __init__(self, A, structure=None, use_exact_onenorm=False): """ Initialize the object. Parameters ---------- A : a dense or sparse square numpy matrix or ndarray The matrix to be exponentiated. structure : str, optional A string describing the structure of matrix `A`. Only `upper_triangular` is currently supported. use_exact_onenorm : bool, optional If True then only the exact one-norm of matrix powers and products will be used. Otherwise, the one-norm of powers and products may initially be estimated. """ self.A = A self._A2 = None self._A4 = None self._A6 = None self._A8 = None self._A10 = None self._d4_exact = None self._d6_exact = None self._d8_exact = None self._d10_exact = None self._d4_approx = None self._d6_approx = None self._d8_approx = None self._d10_approx = None self.ident = _ident_like(A) self.structure = structure self.use_exact_onenorm = use_exact_onenorm @property def A2(self): if self._A2 is None: self._A2 = _smart_matrix_product( self.A, self.A, structure=self.structure) return self._A2 @property def A4(self): if self._A4 is None: self._A4 = _smart_matrix_product( self.A2, self.A2, structure=self.structure) return self._A4 @property def A6(self): if self._A6 is None: self._A6 = _smart_matrix_product( self.A4, self.A2, structure=self.structure) return self._A6 @property def A8(self): if self._A8 is None: self._A8 = _smart_matrix_product( self.A6, self.A2, structure=self.structure) return self._A8 @property def A10(self): if self._A10 is None: self._A10 = _smart_matrix_product( self.A4, self.A6, structure=self.structure) return self._A10 @property def d4_tight(self): if self._d4_exact is None: self._d4_exact = _onenorm(self.A4)**(1/4.) return self._d4_exact @property def d6_tight(self): if self._d6_exact is None: self._d6_exact = _onenorm(self.A6)**(1/6.) return self._d6_exact @property def d8_tight(self): if self._d8_exact is None: self._d8_exact = _onenorm(self.A8)**(1/8.) return self._d8_exact @property def d10_tight(self): if self._d10_exact is None: self._d10_exact = _onenorm(self.A10)**(1/10.) return self._d10_exact @property def d4_loose(self): if self.use_exact_onenorm: return self.d4_tight if self._d4_exact is not None: return self._d4_exact else: if self._d4_approx is None: self._d4_approx = _onenormest_matrix_power(self.A2, 2, structure=self.structure)**(1/4.) return self._d4_approx @property def d6_loose(self): if self.use_exact_onenorm: return self.d6_tight if self._d6_exact is not None: return self._d6_exact else: if self._d6_approx is None: self._d6_approx = _onenormest_matrix_power(self.A2, 3, structure=self.structure)**(1/6.) return self._d6_approx @property def d8_loose(self): if self.use_exact_onenorm: return self.d8_tight if self._d8_exact is not None: return self._d8_exact else: if self._d8_approx is None: self._d8_approx = _onenormest_matrix_power(self.A4, 2, structure=self.structure)**(1/8.) return self._d8_approx @property def d10_loose(self): if self.use_exact_onenorm: return self.d10_tight if self._d10_exact is not None: return self._d10_exact else: if self._d10_approx is None: self._d10_approx = _onenormest_product((self.A4, self.A6), structure=self.structure)**(1/10.) return self._d10_approx def pade3(self): b = (120., 60., 12., 1.) U = _smart_matrix_product(self.A, b[3]*self.A2 + b[1]*self.ident, structure=self.structure) V = b[2]*self.A2 + b[0]*self.ident return U, V def pade5(self): b = (30240., 15120., 3360., 420., 30., 1.) U = _smart_matrix_product(self.A, b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, structure=self.structure) V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident return U, V def pade7(self): b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) U = _smart_matrix_product(self.A, b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, structure=self.structure) V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident return U, V def pade9(self): b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., 2162160., 110880., 3960., 90., 1.) U = _smart_matrix_product(self.A, (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident), structure=self.structure) V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident) return U, V def pade13_scaled(self, s): b = (64764752532480000., 32382376266240000., 7771770303897600., 1187353796428800., 129060195264000., 10559470521600., 670442572800., 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.) B = self.A * 2**-s B2 = self.A2 * 2**(-2*s) B4 = self.A4 * 2**(-4*s) B6 = self.A6 * 2**(-6*s) U2 = _smart_matrix_product(B6, b[13]*B6 + b[11]*B4 + b[9]*B2, structure=self.structure) U = _smart_matrix_product(B, (U2 + b[7]*B6 + b[5]*B4 + b[3]*B2 + b[1]*self.ident), structure=self.structure) V2 = _smart_matrix_product(B6, b[12]*B6 + b[10]*B4 + b[8]*B2, structure=self.structure) V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident return U, V def expm(A): """ Compute the matrix exponential using Pade approximation. Parameters ---------- A : (M,M) array_like or sparse matrix 2D Array or Matrix (sparse or dense) to be exponentiated Returns ------- expA : (M,M) ndarray Matrix exponential of `A` Notes ----- This is algorithm (6.1) which is a simplification of algorithm (5.1). .. versionadded:: 0.12.0 References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) "A New Scaling and Squaring Algorithm for the Matrix Exponential." SIAM Journal on Matrix Analysis and Applications. 31 (3). pp. 970-989. ISSN 1095-7162 Examples -------- >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import expm >>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> A.toarray() array([[1, 0, 0], [0, 2, 0], [0, 0, 3]], dtype=int64) >>> Aexp = expm(A) >>> Aexp <3x3 sparse matrix of type '<class 'numpy.float64'>' with 3 stored elements in Compressed Sparse Column format> >>> Aexp.toarray() array([[ 2.71828183, 0. , 0. ], [ 0. , 7.3890561 , 0. ], [ 0. , 0. , 20.08553692]]) """ return _expm(A, use_exact_onenorm='auto') def _expm(A, use_exact_onenorm): # Core of expm, separated to allow testing exact and approximate # algorithms. # Avoid indiscriminate asarray() to allow sparse or other strange arrays. if isinstance(A, (list, tuple, np.matrix)): A = np.asarray(A) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected a square matrix') # gracefully handle size-0 input, # carefully handling sparse scenario if A.shape == (0, 0): out = np.zeros([0, 0], dtype=A.dtype) if issparse(A) or is_pydata_spmatrix(A): return A.__class__(out) return out # Trivial case if A.shape == (1, 1): out = [[np.exp(A[0, 0])]] # Avoid indiscriminate casting to ndarray to # allow for sparse or other strange arrays if issparse(A) or is_pydata_spmatrix(A): return A.__class__(out) return np.array(out) # Ensure input is of float type, to avoid integer overflows etc. if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A)) and not np.issubdtype(A.dtype, np.inexact)): A = A.astype(float) # Detect upper triangularity. structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None if use_exact_onenorm == "auto": # Hardcode a matrix order threshold for exact vs. estimated one-norms. use_exact_onenorm = A.shape[0] < 200 # Track functions of A to help compute the matrix exponential. h = _ExpmPadeHelper( A, structure=structure, use_exact_onenorm=use_exact_onenorm) # Try Pade order 3. eta_1 = max(h.d4_loose, h.d6_loose) if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0: U, V = h.pade3() return _solve_P_Q(U, V, structure=structure) # Try Pade order 5. eta_2 = max(h.d4_tight, h.d6_loose) if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0: U, V = h.pade5() return _solve_P_Q(U, V, structure=structure) # Try Pade orders 7 and 9. eta_3 = max(h.d6_tight, h.d8_loose) if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0: U, V = h.pade7() return _solve_P_Q(U, V, structure=structure) if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0: U, V = h.pade9() return _solve_P_Q(U, V, structure=structure) # Use Pade order 13. eta_4 = max(h.d8_loose, h.d10_loose) eta_5 = min(eta_3, eta_4) theta_13 = 4.25 # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13 if eta_5 == 0: # Nilpotent special case s = 0 else: s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0) s = s + _ell(2**-s * h.A, 13) U, V = h.pade13_scaled(s) X = _solve_P_Q(U, V, structure=structure) if structure == UPPER_TRIANGULAR: # Invoke Code Fragment 2.1. X = _fragment_2_1(X, h.A, s) else: # X = r_13(A)^(2^s) by repeated squaring. for i in range(s): X = X.dot(X) return X def _solve_P_Q(U, V, structure=None): """ A helper function for expm_2009. Parameters ---------- U : ndarray Pade numerator. V : ndarray Pade denominator. structure : str, optional A string describing the structure of both matrices `U` and `V`. Only `upper_triangular` is currently supported. Notes ----- The `structure` argument is inspired by similar args for theano and cvxopt functions. """ P = U + V Q = -U + V if issparse(U) or is_pydata_spmatrix(U): return spsolve(Q, P) elif structure is None: return solve(Q, P) elif structure == UPPER_TRIANGULAR: return solve_triangular(Q, P) else: raise ValueError('unsupported matrix structure: ' + str(structure)) def _exp_sinch(a, x): """ Stably evaluate exp(a)*sinh(x)/x Notes ----- The strategy of falling back to a sixth order Taylor expansion was suggested by the Spallation Neutron Source docs which was found on the internet by google search. http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html The details of the cutoff point and the Horner-like evaluation was picked without reference to anything in particular. Note that sinch is not currently implemented in scipy.special, whereas the "engineer's" definition of sinc is implemented. The implementation of sinc involves a scaling factor of pi that distinguishes it from the "mathematician's" version of sinc. """ # If x is small then use sixth order Taylor expansion. # How small is small? I am using the point where the relative error # of the approximation is less than 1e-14. # If x is large then directly evaluate sinh(x) / x. if abs(x) < 0.0135: x2 = x*x return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))) else: return (np.exp(a + x) - np.exp(a - x)) / (2*x) def _eq_10_42(lam_1, lam_2, t_12): """ Equation (10.42) of Functions of Matrices: Theory and Computation. Notes ----- This is a helper function for _fragment_2_1 of expm_2009. Equation (10.42) is on page 251 in the section on Schur algorithms. In particular, section 10.4.3 explains the Schur-Parlett algorithm. expm([[lam_1, t_12], [0, lam_1]) = [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)], [0, exp(lam_2)] """ # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1) # apparently suffers from cancellation, according to Higham's textbook. # A nice implementation of sinch, defined as sinh(x)/x, # will apparently work around the cancellation. a = 0.5 * (lam_1 + lam_2) b = 0.5 * (lam_1 - lam_2) return t_12 * _exp_sinch(a, b) def _fragment_2_1(X, T, s): """ A helper function for expm_2009. Notes ----- The argument X is modified in-place, but this modification is not the same as the returned value of the function. This function also takes pains to do things in ways that are compatible with sparse matrices, for example by avoiding fancy indexing and by using methods of the matrices whenever possible instead of using functions of the numpy or scipy libraries themselves. """ # Form X = r_m(2^-s T) # Replace diag(X) by exp(2^-s diag(T)). n = X.shape[0] diag_T = np.ravel(T.diagonal().copy()) # Replace diag(X) by exp(2^-s diag(T)). scale = 2 ** -s exp_diag = np.exp(scale * diag_T) for k in range(n): X[k, k] = exp_diag[k] for i in range(s-1, -1, -1): X = X.dot(X) # Replace diag(X) by exp(2^-i diag(T)). scale = 2 ** -i exp_diag = np.exp(scale * diag_T) for k in range(n): X[k, k] = exp_diag[k] # Replace (first) superdiagonal of X by explicit formula # for superdiagonal of exp(2^-i T) from Eq (10.42) of # the author's 2008 textbook # Functions of Matrices: Theory and Computation. for k in range(n-1): lam_1 = scale * diag_T[k] lam_2 = scale * diag_T[k+1] t_12 = scale * T[k, k+1] value = _eq_10_42(lam_1, lam_2, t_12) X[k, k+1] = value # Return the updated X matrix. return X def _ell(A, m): """ A helper function for expm_2009. Parameters ---------- A : linear operator A linear operator whose norm of power we care about. m : int The power of the linear operator Returns ------- value : int A value related to a bound. """ if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper. # They are coefficients of terms of a generating function series expansion. c_i = {3: 100800., 5: 10059033600., 7: 4487938430976000., 9: 5914384781877411840000., 13: 113250775606021113483283660800000000. } abs_c_recip = c_i[m] # This is explained after Eq. (1.2) of the 2009 expm paper. # It is the "unit roundoff" of IEEE double precision arithmetic. u = 2**-53 # Compute the one-norm of matrix power p of abs(A). A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1) # Treat zero norm as a special case. if not A_abs_onenorm: return 0 alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip) log2_alpha_div_u = np.log2(alpha/u) value = int(np.ceil(log2_alpha_div_u / (2 * m))) return max(value, 0)
27,210
30.494213
93
py
scipy
scipy-main/scipy/sparse/linalg/_interface.py
"""Abstract linear algebra library. This module defines a class hierarchy that implements a kind of "lazy" matrix representation, called the ``LinearOperator``. It can be used to do linear algebra with extremely large sparse or structured matrices, without representing those explicitly in memory. Such matrices can be added, multiplied, transposed, etc. As a motivating example, suppose you want have a matrix where almost all of the elements have the value one. The standard sparse matrix representation skips the storage of zeros, but not ones. By contrast, a LinearOperator is able to represent such matrices efficiently. First, we need a compact way to represent an all-ones matrix:: >>> import numpy as np >>> class Ones(LinearOperator): ... def __init__(self, shape): ... super().__init__(dtype=None, shape=shape) ... def _matvec(self, x): ... return np.repeat(x.sum(), self.shape[0]) Instances of this class emulate ``np.ones(shape)``, but using a constant amount of storage, independent of ``shape``. The ``_matvec`` method specifies how this linear operator multiplies with (operates on) a vector. We can now add this operator to a sparse matrix that stores only offsets from one:: >>> from scipy.sparse import csr_matrix >>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]]) >>> A = aslinearoperator(offsets) + Ones(offsets.shape) >>> A.dot([1, 2, 3]) array([13, 4, 15]) The result is the same as that given by its dense, explicitly-stored counterpart:: >>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3]) array([13, 4, 15]) Several algorithms in the ``scipy.sparse`` library are able to operate on ``LinearOperator`` instances. """ import warnings import numpy as np from scipy.sparse import issparse from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix __all__ = ['LinearOperator', 'aslinearoperator'] class LinearOperator: """Common interface for performing matrix vector products Many iterative methods (e.g. cg, gmres) do not need to know the individual entries of a matrix to solve a linear system A*x=b. Such solvers only require the computation of matrix vector products, A*v where v is a dense vector. This class serves as an abstract interface between iterative solvers and matrix-like objects. To construct a concrete LinearOperator, either pass appropriate callables to the constructor of this class, or subclass it. A subclass must implement either one of the methods ``_matvec`` and ``_matmat``, and the attributes/properties ``shape`` (pair of integers) and ``dtype`` (may be None). It may call the ``__init__`` on this class to have these attributes validated. Implementing ``_matvec`` automatically implements ``_matmat`` (using a naive algorithm) and vice-versa. Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint`` to implement the Hermitian adjoint (conjugate transpose). As with ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or ``_adjoint`` implements the other automatically. Implementing ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for backwards compatibility. Parameters ---------- shape : tuple Matrix dimensions (M, N). matvec : callable f(v) Returns returns A * v. rmatvec : callable f(v) Returns A^H * v, where A^H is the conjugate transpose of A. matmat : callable f(V) Returns A * V, where V is a dense matrix with dimensions (N, K). dtype : dtype Data type of the matrix. rmatmat : callable f(V) Returns A^H * V, where V is a dense matrix with dimensions (M, K). Attributes ---------- args : tuple For linear operators describing products etc. of other linear operators, the operands of the binary operation. ndim : int Number of dimensions (this is always 2) See Also -------- aslinearoperator : Construct LinearOperators Notes ----- The user-defined matvec() function must properly handle the case where v has shape (N,) as well as the (N,1) case. The shape of the return type is handled internally by LinearOperator. LinearOperator instances can also be multiplied, added with each other and exponentiated, all lazily: the result of these operations is always a new, composite LinearOperator, that defers linear operations to the original operators and combines the results. More details regarding how to subclass a LinearOperator and several examples of concrete LinearOperator instances can be found in the external project `PyLops <https://pylops.readthedocs.io>`_. Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import LinearOperator >>> def mv(v): ... return np.array([2*v[0], 3*v[1]]) ... >>> A = LinearOperator((2,2), matvec=mv) >>> A <2x2 _CustomLinearOperator with dtype=float64> >>> A.matvec(np.ones(2)) array([ 2., 3.]) >>> A * np.ones(2) array([ 2., 3.]) """ ndim = 2 # Necessary for right matmul with numpy arrays. __array_ufunc__ = None def __new__(cls, *args, **kwargs): if cls is LinearOperator: # Operate as _CustomLinearOperator factory. return super().__new__(_CustomLinearOperator) else: obj = super().__new__(cls) if (type(obj)._matvec == LinearOperator._matvec and type(obj)._matmat == LinearOperator._matmat): warnings.warn("LinearOperator subclass should implement" " at least one of _matvec and _matmat.", category=RuntimeWarning, stacklevel=2) return obj def __init__(self, dtype, shape): """Initialize this LinearOperator. To be called by subclasses. ``dtype`` may be None; ``shape`` should be convertible to a length-2 tuple. """ if dtype is not None: dtype = np.dtype(dtype) shape = tuple(shape) if not isshape(shape): raise ValueError(f"invalid shape {shape!r} (must be 2-d)") self.dtype = dtype self.shape = shape def _init_dtype(self): """Called from subclasses at the end of the __init__ routine. """ if self.dtype is None: v = np.zeros(self.shape[-1]) self.dtype = np.asarray(self.matvec(v)).dtype def _matmat(self, X): """Default matrix-matrix multiplication handler. Falls back on the user-defined _matvec method, so defining that will define matrix multiplication (though in a very suboptimal way). """ return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T]) def _matvec(self, x): """Default matrix-vector multiplication handler. If self is a linear operator of shape (M, N), then this method will be called on a shape (N,) or (N, 1) ndarray, and should return a shape (M,) or (M, 1) ndarray. This default implementation falls back on _matmat, so defining that will define matrix-vector multiplication as well. """ return self.matmat(x.reshape(-1, 1)) def matvec(self, x): """Matrix-vector multiplication. Performs the operation y=A*x where A is an MxN linear operator and x is a column vector or 1-d array. Parameters ---------- x : {matrix, ndarray} An array with shape (N,) or (N,1). Returns ------- y : {matrix, ndarray} A matrix or ndarray with shape (M,) or (M,1) depending on the type and shape of the x argument. Notes ----- This matvec wraps the user-specified matvec routine or overridden _matvec method to ensure that y has the correct shape and type. """ x = np.asanyarray(x) M,N = self.shape if x.shape != (N,) and x.shape != (N,1): raise ValueError('dimension mismatch') y = self._matvec(x) if isinstance(x, np.matrix): y = asmatrix(y) else: y = np.asarray(y) if x.ndim == 1: y = y.reshape(M) elif x.ndim == 2: y = y.reshape(M,1) else: raise ValueError('invalid shape returned by user-defined matvec()') return y def rmatvec(self, x): """Adjoint matrix-vector multiplication. Performs the operation y = A^H * x where A is an MxN linear operator and x is a column vector or 1-d array. Parameters ---------- x : {matrix, ndarray} An array with shape (M,) or (M,1). Returns ------- y : {matrix, ndarray} A matrix or ndarray with shape (N,) or (N,1) depending on the type and shape of the x argument. Notes ----- This rmatvec wraps the user-specified rmatvec routine or overridden _rmatvec method to ensure that y has the correct shape and type. """ x = np.asanyarray(x) M,N = self.shape if x.shape != (M,) and x.shape != (M,1): raise ValueError('dimension mismatch') y = self._rmatvec(x) if isinstance(x, np.matrix): y = asmatrix(y) else: y = np.asarray(y) if x.ndim == 1: y = y.reshape(N) elif x.ndim == 2: y = y.reshape(N,1) else: raise ValueError('invalid shape returned by user-defined rmatvec()') return y def _rmatvec(self, x): """Default implementation of _rmatvec; defers to adjoint.""" if type(self)._adjoint == LinearOperator._adjoint: # _adjoint not overridden, prevent infinite recursion raise NotImplementedError else: return self.H.matvec(x) def matmat(self, X): """Matrix-matrix multiplication. Performs the operation y=A*X where A is an MxN linear operator and X dense N*K matrix or ndarray. Parameters ---------- X : {matrix, ndarray} An array with shape (N,K). Returns ------- Y : {matrix, ndarray} A matrix or ndarray with shape (M,K) depending on the type of the X argument. Notes ----- This matmat wraps any user-specified matmat routine or overridden _matmat method to ensure that y has the correct type. """ if not (issparse(X) or is_pydata_spmatrix(X)): X = np.asanyarray(X) if X.ndim != 2: raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d') if X.shape[0] != self.shape[1]: raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}') try: Y = self._matmat(X) except Exception as e: if issparse(X) or is_pydata_spmatrix(X): raise TypeError( "Unable to multiply a LinearOperator with a sparse matrix." " Wrap the matrix in aslinearoperator first." ) from e raise if isinstance(Y, np.matrix): Y = asmatrix(Y) return Y def rmatmat(self, X): """Adjoint matrix-matrix multiplication. Performs the operation y = A^H * x where A is an MxN linear operator and x is a column vector or 1-d array, or 2-d array. The default implementation defers to the adjoint. Parameters ---------- X : {matrix, ndarray} A matrix or 2D array. Returns ------- Y : {matrix, ndarray} A matrix or 2D array depending on the type of the input. Notes ----- This rmatmat wraps the user-specified rmatmat routine. """ if not (issparse(X) or is_pydata_spmatrix(X)): X = np.asanyarray(X) if X.ndim != 2: raise ValueError('expected 2-d ndarray or matrix, not %d-d' % X.ndim) if X.shape[0] != self.shape[0]: raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}') try: Y = self._rmatmat(X) except Exception as e: if issparse(X) or is_pydata_spmatrix(X): raise TypeError( "Unable to multiply a LinearOperator with a sparse matrix." " Wrap the matrix in aslinearoperator() first." ) from e raise if isinstance(Y, np.matrix): Y = asmatrix(Y) return Y def _rmatmat(self, X): """Default implementation of _rmatmat defers to rmatvec or adjoint.""" if type(self)._adjoint == LinearOperator._adjoint: return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T]) else: return self.H.matmat(X) def __call__(self, x): return self*x def __mul__(self, x): return self.dot(x) def __truediv__(self, other): if not np.isscalar(other): raise ValueError("Can only divide a linear operator by a scalar.") return _ScaledLinearOperator(self, 1.0/other) def dot(self, x): """Matrix-matrix or matrix-vector multiplication. Parameters ---------- x : array_like 1-d or 2-d array, representing a vector or matrix. Returns ------- Ax : array 1-d or 2-d array (depending on the shape of x) that represents the result of applying this linear operator on x. """ if isinstance(x, LinearOperator): return _ProductLinearOperator(self, x) elif np.isscalar(x): return _ScaledLinearOperator(self, x) else: if not issparse(x) and not is_pydata_spmatrix(x): # Sparse matrices shouldn't be converted to numpy arrays. x = np.asarray(x) if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1: return self.matvec(x) elif x.ndim == 2: return self.matmat(x) else: raise ValueError('expected 1-d or 2-d array or matrix, got %r' % x) def __matmul__(self, other): if np.isscalar(other): raise ValueError("Scalar operands are not allowed, " "use '*' instead") return self.__mul__(other) def __rmatmul__(self, other): if np.isscalar(other): raise ValueError("Scalar operands are not allowed, " "use '*' instead") return self.__rmul__(other) def __rmul__(self, x): if np.isscalar(x): return _ScaledLinearOperator(self, x) else: return self._rdot(x) def _rdot(self, x): """Matrix-matrix or matrix-vector multiplication from the right. Parameters ---------- x : array_like 1-d or 2-d array, representing a vector or matrix. Returns ------- xA : array 1-d or 2-d array (depending on the shape of x) that represents the result of applying this linear operator on x from the right. Notes ----- This is copied from dot to implement right multiplication. """ if isinstance(x, LinearOperator): return _ProductLinearOperator(x, self) elif np.isscalar(x): return _ScaledLinearOperator(self, x) else: if not issparse(x) and not is_pydata_spmatrix(x): # Sparse matrices shouldn't be converted to numpy arrays. x = np.asarray(x) # We use transpose instead of rmatvec/rmatmat to avoid # unnecessary complex conjugation if possible. if x.ndim == 1 or x.ndim == 2 and x.shape[0] == 1: return self.T.matvec(x.T).T elif x.ndim == 2: return self.T.matmat(x.T).T else: raise ValueError('expected 1-d or 2-d array or matrix, got %r' % x) def __pow__(self, p): if np.isscalar(p): return _PowerLinearOperator(self, p) else: return NotImplemented def __add__(self, x): if isinstance(x, LinearOperator): return _SumLinearOperator(self, x) else: return NotImplemented def __neg__(self): return _ScaledLinearOperator(self, -1) def __sub__(self, x): return self.__add__(-x) def __repr__(self): M,N = self.shape if self.dtype is None: dt = 'unspecified dtype' else: dt = 'dtype=' + str(self.dtype) return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt) def adjoint(self): """Hermitian adjoint. Returns the Hermitian adjoint of self, aka the Hermitian conjugate or Hermitian transpose. For a complex matrix, the Hermitian adjoint is equal to the conjugate transpose. Can be abbreviated self.H instead of self.adjoint(). Returns ------- A_H : LinearOperator Hermitian adjoint of self. """ return self._adjoint() H = property(adjoint) def transpose(self): """Transpose this linear operator. Returns a LinearOperator that represents the transpose of this one. Can be abbreviated self.T instead of self.transpose(). """ return self._transpose() T = property(transpose) def _adjoint(self): """Default implementation of _adjoint; defers to rmatvec.""" return _AdjointLinearOperator(self) def _transpose(self): """ Default implementation of _transpose; defers to rmatvec + conj""" return _TransposedLinearOperator(self) class _CustomLinearOperator(LinearOperator): """Linear operator defined in terms of user-specified operations.""" def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None, rmatmat=None): super().__init__(dtype, shape) self.args = () self.__matvec_impl = matvec self.__rmatvec_impl = rmatvec self.__rmatmat_impl = rmatmat self.__matmat_impl = matmat self._init_dtype() def _matmat(self, X): if self.__matmat_impl is not None: return self.__matmat_impl(X) else: return super()._matmat(X) def _matvec(self, x): return self.__matvec_impl(x) def _rmatvec(self, x): func = self.__rmatvec_impl if func is None: raise NotImplementedError("rmatvec is not defined") return self.__rmatvec_impl(x) def _rmatmat(self, X): if self.__rmatmat_impl is not None: return self.__rmatmat_impl(X) else: return super()._rmatmat(X) def _adjoint(self): return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]), matvec=self.__rmatvec_impl, rmatvec=self.__matvec_impl, matmat=self.__rmatmat_impl, rmatmat=self.__matmat_impl, dtype=self.dtype) class _AdjointLinearOperator(LinearOperator): """Adjoint of arbitrary Linear Operator""" def __init__(self, A): shape = (A.shape[1], A.shape[0]) super().__init__(dtype=A.dtype, shape=shape) self.A = A self.args = (A,) def _matvec(self, x): return self.A._rmatvec(x) def _rmatvec(self, x): return self.A._matvec(x) def _matmat(self, x): return self.A._rmatmat(x) def _rmatmat(self, x): return self.A._matmat(x) class _TransposedLinearOperator(LinearOperator): """Transposition of arbitrary Linear Operator""" def __init__(self, A): shape = (A.shape[1], A.shape[0]) super().__init__(dtype=A.dtype, shape=shape) self.A = A self.args = (A,) def _matvec(self, x): # NB. np.conj works also on sparse matrices return np.conj(self.A._rmatvec(np.conj(x))) def _rmatvec(self, x): return np.conj(self.A._matvec(np.conj(x))) def _matmat(self, x): # NB. np.conj works also on sparse matrices return np.conj(self.A._rmatmat(np.conj(x))) def _rmatmat(self, x): return np.conj(self.A._matmat(np.conj(x))) def _get_dtype(operators, dtypes=None): if dtypes is None: dtypes = [] for obj in operators: if obj is not None and hasattr(obj, 'dtype'): dtypes.append(obj.dtype) return np.result_type(*dtypes) class _SumLinearOperator(LinearOperator): def __init__(self, A, B): if not isinstance(A, LinearOperator) or \ not isinstance(B, LinearOperator): raise ValueError('both operands have to be a LinearOperator') if A.shape != B.shape: raise ValueError(f'cannot add {A} and {B}: shape mismatch') self.args = (A, B) super().__init__(_get_dtype([A, B]), A.shape) def _matvec(self, x): return self.args[0].matvec(x) + self.args[1].matvec(x) def _rmatvec(self, x): return self.args[0].rmatvec(x) + self.args[1].rmatvec(x) def _rmatmat(self, x): return self.args[0].rmatmat(x) + self.args[1].rmatmat(x) def _matmat(self, x): return self.args[0].matmat(x) + self.args[1].matmat(x) def _adjoint(self): A, B = self.args return A.H + B.H class _ProductLinearOperator(LinearOperator): def __init__(self, A, B): if not isinstance(A, LinearOperator) or \ not isinstance(B, LinearOperator): raise ValueError('both operands have to be a LinearOperator') if A.shape[1] != B.shape[0]: raise ValueError(f'cannot multiply {A} and {B}: shape mismatch') super().__init__(_get_dtype([A, B]), (A.shape[0], B.shape[1])) self.args = (A, B) def _matvec(self, x): return self.args[0].matvec(self.args[1].matvec(x)) def _rmatvec(self, x): return self.args[1].rmatvec(self.args[0].rmatvec(x)) def _rmatmat(self, x): return self.args[1].rmatmat(self.args[0].rmatmat(x)) def _matmat(self, x): return self.args[0].matmat(self.args[1].matmat(x)) def _adjoint(self): A, B = self.args return B.H * A.H class _ScaledLinearOperator(LinearOperator): def __init__(self, A, alpha): if not isinstance(A, LinearOperator): raise ValueError('LinearOperator expected as A') if not np.isscalar(alpha): raise ValueError('scalar expected as alpha') if isinstance(A, _ScaledLinearOperator): A, alpha_original = A.args # Avoid in-place multiplication so that we don't accidentally mutate # the original prefactor. alpha = alpha * alpha_original dtype = _get_dtype([A], [type(alpha)]) super().__init__(dtype, A.shape) self.args = (A, alpha) def _matvec(self, x): return self.args[1] * self.args[0].matvec(x) def _rmatvec(self, x): return np.conj(self.args[1]) * self.args[0].rmatvec(x) def _rmatmat(self, x): return np.conj(self.args[1]) * self.args[0].rmatmat(x) def _matmat(self, x): return self.args[1] * self.args[0].matmat(x) def _adjoint(self): A, alpha = self.args return A.H * np.conj(alpha) class _PowerLinearOperator(LinearOperator): def __init__(self, A, p): if not isinstance(A, LinearOperator): raise ValueError('LinearOperator expected as A') if A.shape[0] != A.shape[1]: raise ValueError('square LinearOperator expected, got %r' % A) if not isintlike(p) or p < 0: raise ValueError('non-negative integer expected as p') super().__init__(_get_dtype([A]), A.shape) self.args = (A, p) def _power(self, fun, x): res = np.array(x, copy=True) for i in range(self.args[1]): res = fun(res) return res def _matvec(self, x): return self._power(self.args[0].matvec, x) def _rmatvec(self, x): return self._power(self.args[0].rmatvec, x) def _rmatmat(self, x): return self._power(self.args[0].rmatmat, x) def _matmat(self, x): return self._power(self.args[0].matmat, x) def _adjoint(self): A, p = self.args return A.H ** p class MatrixLinearOperator(LinearOperator): def __init__(self, A): super().__init__(A.dtype, A.shape) self.A = A self.__adj = None self.args = (A,) def _matmat(self, X): return self.A.dot(X) def _adjoint(self): if self.__adj is None: self.__adj = _AdjointMatrixOperator(self) return self.__adj class _AdjointMatrixOperator(MatrixLinearOperator): def __init__(self, adjoint): self.A = adjoint.A.T.conj() self.__adjoint = adjoint self.args = (adjoint,) self.shape = adjoint.shape[1], adjoint.shape[0] @property def dtype(self): return self.__adjoint.dtype def _adjoint(self): return self.__adjoint class IdentityOperator(LinearOperator): def __init__(self, shape, dtype=None): super().__init__(dtype, shape) def _matvec(self, x): return x def _rmatvec(self, x): return x def _rmatmat(self, x): return x def _matmat(self, x): return x def _adjoint(self): return self def aslinearoperator(A): """Return A as a LinearOperator. 'A' may be any of the following types: - ndarray - matrix - sparse matrix (e.g. csr_matrix, lil_matrix, etc.) - LinearOperator - An object with .shape and .matvec attributes See the LinearOperator documentation for additional information. Notes ----- If 'A' has no .dtype attribute, the data type is determined by calling :func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this call upon the linear operator creation. Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import aslinearoperator >>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32) >>> aslinearoperator(M) <2x3 MatrixLinearOperator with dtype=int32> """ if isinstance(A, LinearOperator): return A elif isinstance(A, np.ndarray) or isinstance(A, np.matrix): if A.ndim > 2: raise ValueError('array must have ndim <= 2') A = np.atleast_2d(np.asarray(A)) return MatrixLinearOperator(A) elif issparse(A) or is_pydata_spmatrix(A): return MatrixLinearOperator(A) else: if hasattr(A, 'shape') and hasattr(A, 'matvec'): rmatvec = None rmatmat = None dtype = None if hasattr(A, 'rmatvec'): rmatvec = A.rmatvec if hasattr(A, 'rmatmat'): rmatmat = A.rmatmat if hasattr(A, 'dtype'): dtype = A.dtype return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, rmatmat=rmatmat, dtype=dtype) else: raise TypeError('type not understood')
27,845
30.112849
82
py
scipy
scipy-main/scipy/sparse/linalg/_dsolve/setup.py
from os.path import join, dirname import sys import glob def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info from scipy._build_utils import numpy_nodepr_api config = Configuration('_dsolve',parent_package,top_path) config.add_data_dir('tests') lapack_opt = get_info('lapack_opt',notfound_action=2) if sys.platform == 'win32': superlu_defs = [('NO_TIMER',1)] else: superlu_defs = [] superlu_defs.append(('USE_VENDOR_BLAS',1)) superlu_src = join(dirname(__file__), 'SuperLU', 'SRC') sources = sorted(glob.glob(join(superlu_src, '*.c'))) headers = list(glob.glob(join(superlu_src, '*.h'))) config.add_library('superlu_src', sources=sources, macros=superlu_defs, include_dirs=[superlu_src], ) # Extension ext_sources = ['_superlumodule.c', '_superlu_utils.c', '_superluobject.c'] config.add_extension('_superlu', sources=ext_sources, libraries=['superlu_src'], depends=(sources + headers), extra_info=lapack_opt, **numpy_nodepr_api ) # Add license files config.add_data_files('SuperLU/License.txt') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
1,614
28.907407
61
py
scipy
scipy-main/scipy/sparse/linalg/_dsolve/linsolve.py
from warnings import warn import numpy as np from numpy import asarray from scipy.sparse import (issparse, SparseEfficiencyWarning, csc_matrix, csr_matrix) from scipy.sparse._sputils import is_pydata_spmatrix from scipy.linalg import LinAlgError import copy from . import _superlu noScikit = False try: import scikits.umfpack as umfpack except ImportError: noScikit = True useUmfpack = not noScikit __all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized', 'MatrixRankWarning', 'spsolve_triangular'] class MatrixRankWarning(UserWarning): pass def use_solver(**kwargs): """ Select default sparse direct solver to be used. Parameters ---------- useUmfpack : bool, optional Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only if ``scikits.umfpack`` is installed. Default: True assumeSortedIndices : bool, optional Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix. Has effect only if useUmfpack is True and ``scikits.umfpack`` is installed. Default: False Notes ----- The default sparse solver is UMFPACK when available (``scikits.umfpack`` is installed). This can be changed by passing useUmfpack = False, which then causes the always present SuperLU based solver to be used. UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If sure that the matrix fulfills this, pass ``assumeSortedIndices=True`` to gain some speed. References ---------- .. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern multifrontal method with a column pre-ordering strategy, ACM Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. https://dl.acm.org/doi/abs/10.1145/992200.992206 .. [2] T. A. Davis, A column pre-ordering strategy for the unsymmetric-pattern multifrontal method, ACM Trans. on Mathematical Software, 30(2), 2004, pp. 165--195. https://dl.acm.org/doi/abs/10.1145/992200.992205 .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal method for unsymmetric sparse matrices, ACM Trans. on Mathematical Software, 25(1), 1999, pp. 1--19. https://doi.org/10.1145/305658.287640 .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal method for sparse LU factorization, SIAM J. Matrix Analysis and Computations, 18(1), 1997, pp. 140--158. https://doi.org/10.1137/S0895479894246905T. Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import use_solver, spsolve >>> from scipy.sparse import csc_matrix >>> R = np.random.randn(5, 5) >>> A = csc_matrix(R) >>> b = np.random.randn(5) >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK >>> x = spsolve(A, b) >>> np.allclose(A.dot(x), b) True >>> use_solver(useUmfpack=True) # reset umfPack usage to default """ if 'useUmfpack' in kwargs: globals()['useUmfpack'] = kwargs['useUmfpack'] if useUmfpack and 'assumeSortedIndices' in kwargs: umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices']) def _get_umf_family(A): """Get umfpack family string given the sparse matrix dtype.""" _families = { (np.float64, np.int32): 'di', (np.complex128, np.int32): 'zi', (np.float64, np.int64): 'dl', (np.complex128, np.int64): 'zl' } # A.dtype.name can only be "float64" or # "complex128" in control flow f_type = getattr(np, A.dtype.name) # control flow may allow for more index # types to get through here i_type = getattr(np, A.indices.dtype.name) try: family = _families[(f_type, i_type)] except KeyError as e: msg = 'only float64 or complex128 matrices with int32 or int64' \ ' indices are supported! (got: matrix: %s, indices: %s)' \ % (f_type, i_type) raise ValueError(msg) from e # See gh-8278. Considered converting only if # A.shape[0]*A.shape[1] > np.iinfo(np.int32).max, # but that didn't always fix the issue. family = family[0] + "l" A_new = copy.copy(A) A_new.indptr = np.array(A.indptr, copy=False, dtype=np.int64) A_new.indices = np.array(A.indices, copy=False, dtype=np.int64) return family, A_new def _safe_downcast_indices(A): # check for safe downcasting max_value = np.iinfo(np.intc).max if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted raise ValueError("indptr values too large for SuperLU") if max(*A.shape) > max_value: # only check large enough arrays if np.any(A.indices > max_value): raise ValueError("indices values too large for SuperLU") indices = A.indices.astype(np.intc, copy=False) indptr = A.indptr.astype(np.intc, copy=False) return indices, indptr def spsolve(A, b, permc_spec=None, use_umfpack=True): """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. If a vector, b.shape must be (n,) or (n, 1). permc_spec : str, optional How to permute the columns of the matrix for sparsity preservation. (default: 'COLAMD') - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_. use_umfpack : bool, optional if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_, [6]_ . This is only referenced if b is a vector and ``scikits.umfpack`` is installed. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[1] If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) Notes ----- For solving the matrix expression AX = B, this solver assumes the resulting matrix X is sparse, as is often the case for very sparse inputs. If the resulting X is dense, the construction of this sparse result will be relatively expensive. In that case, consider converting A to a dense matrix and using scipy.linalg.solve or its variants. References ---------- .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836: COLAMD, an approximate column minimum degree ordering algorithm, ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380. :doi:`10.1145/1024074.1024080` .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate minimum degree ordering algorithm, ACM Trans. on Mathematical Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079` .. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern multifrontal method with a column pre-ordering strategy, ACM Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. https://dl.acm.org/doi/abs/10.1145/992200.992206 .. [4] T. A. Davis, A column pre-ordering strategy for the unsymmetric-pattern multifrontal method, ACM Trans. on Mathematical Software, 30(2), 2004, pp. 165--195. https://dl.acm.org/doi/abs/10.1145/992200.992205 .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal method for unsymmetric sparse matrices, ACM Trans. on Mathematical Software, 25(1), 1999, pp. 1--19. https://doi.org/10.1145/305658.287640 .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal method for sparse LU factorization, SIAM J. Matrix Analysis and Computations, 18(1), 1997, pp. 140--158. https://doi.org/10.1137/S0895479894246905T. Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import spsolve >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float) >>> x = spsolve(A, B) >>> np.allclose(A.dot(x).toarray(), B.toarray()) True """ if is_pydata_spmatrix(A): A = A.to_scipy_sparse().tocsc() if not (issparse(A) and A.format in ("csc", "csr")): A = csc_matrix(A) warn('spsolve requires A be CSC or CSR matrix format', SparseEfficiencyWarning) # b is a vector only if b have shape (n,) or (n, 1) b_is_sparse = issparse(b) or is_pydata_spmatrix(b) if not b_is_sparse: b = asarray(b) b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1)) # sum duplicates for non-canonical format A.sum_duplicates() A = A._asfptype() # upcast to a floating point format result_dtype = np.promote_types(A.dtype, b.dtype) if A.dtype != result_dtype: A = A.astype(result_dtype) if b.dtype != result_dtype: b = b.astype(result_dtype) # validate input shapes M, N = A.shape if (M != N): raise ValueError(f"matrix must be square (has shape {(M, N)})") if M != b.shape[0]: raise ValueError("matrix - rhs dimension mismatch (%s - %s)" % (A.shape, b.shape[0])) use_umfpack = use_umfpack and useUmfpack if b_is_vector and use_umfpack: if b_is_sparse: b_vec = b.toarray() else: b_vec = b b_vec = asarray(b_vec, dtype=A.dtype).ravel() if noScikit: raise RuntimeError('Scikits.umfpack not installed.') if A.dtype.char not in 'dD': raise ValueError("convert matrix data to double, please, using" " .astype(), or set linsolve.useUmfpack = False") umf_family, A = _get_umf_family(A) umf = umfpack.UmfpackContext(umf_family) x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, autoTranspose=True) else: if b_is_vector and b_is_sparse: b = b.toarray() b_is_sparse = False if not b_is_sparse: if A.format == "csc": flag = 1 # CSC format else: flag = 0 # CSR format indices = A.indices.astype(np.intc, copy=False) indptr = A.indptr.astype(np.intc, copy=False) options = dict(ColPerm=permc_spec) x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr, b, flag, options=options) if info != 0: warn("Matrix is exactly singular", MatrixRankWarning) x.fill(np.nan) if b_is_vector: x = x.ravel() else: # b is sparse Afactsolve = factorized(A) if not (b.format == "csc" or is_pydata_spmatrix(b)): warn('spsolve is more efficient when sparse b ' 'is in the CSC matrix format', SparseEfficiencyWarning) b = csc_matrix(b) # Create a sparse output matrix by repeatedly applying # the sparse factorization to solve columns of b. data_segs = [] row_segs = [] col_segs = [] for j in range(b.shape[1]): # TODO: replace this with # bj = b[:, j].toarray().ravel() # once 1D sparse arrays are supported. # That is a slightly faster code path. bj = b[:, [j]].toarray().ravel() xj = Afactsolve(bj) w = np.flatnonzero(xj) segment_length = w.shape[0] row_segs.append(w) col_segs.append(np.full(segment_length, j, dtype=int)) data_segs.append(np.asarray(xj[w], dtype=A.dtype)) sparse_data = np.concatenate(data_segs) sparse_row = np.concatenate(row_segs) sparse_col = np.concatenate(col_segs) x = A.__class__((sparse_data, (sparse_row, sparse_col)), shape=b.shape, dtype=A.dtype) if is_pydata_spmatrix(b): x = b.__class__(x) return x def splu(A, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=dict()): """ Compute the LU decomposition of a sparse, square matrix. Parameters ---------- A : sparse matrix Sparse matrix to factorize. Most efficient when provided in CSC format. Other formats will be converted to CSC before factorization. permc_spec : str, optional How to permute the columns of the matrix for sparsity preservation. (default: 'COLAMD') - ``NATURAL``: natural ordering. - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. - ``COLAMD``: approximate minimum degree column ordering diag_pivot_thresh : float, optional Threshold used for a diagonal entry to be an acceptable pivot. See SuperLU user's guide for details [1]_ relax : int, optional Expert option for customizing the degree of relaxing supernodes. See SuperLU user's guide for details [1]_ panel_size : int, optional Expert option for customizing the panel size. See SuperLU user's guide for details [1]_ options : dict, optional Dictionary containing additional expert options to SuperLU. See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument) for more details. For example, you can specify ``options=dict(Equil=False, IterRefine='SINGLE'))`` to turn equilibration off and perform a single iterative refinement. Returns ------- invA : scipy.sparse.linalg.SuperLU Object, which has a ``solve`` method. See also -------- spilu : incomplete LU decomposition Notes ----- This function uses the SuperLU library. References ---------- .. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/ Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import splu >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) >>> B = splu(A) >>> x = np.array([1., 2., 3.], dtype=float) >>> B.solve(x) array([ 1. , -3. , -1.5]) >>> A.dot(B.solve(x)) array([ 1., 2., 3.]) >>> B.solve(A.dot(x)) array([ 1., 2., 3.]) """ if is_pydata_spmatrix(A): def csc_construct_func(*a, cls=type(A)): return cls(csc_matrix(*a)) A = A.to_scipy_sparse().tocsc() else: csc_construct_func = csc_matrix if not (issparse(A) and A.format == "csc"): A = csc_matrix(A) warn('splu converted its input to CSC format', SparseEfficiencyWarning) # sum duplicates for non-canonical format A.sum_duplicates() A = A._asfptype() # upcast to a floating point format M, N = A.shape if (M != N): raise ValueError("can only factor square matrices") # is this true? indices, indptr = _safe_downcast_indices(A) _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, PanelSize=panel_size, Relax=relax) if options is not None: _options.update(options) # Ensure that no column permutations are applied if (_options["ColPerm"] == "NATURAL"): _options["SymmetricMode"] = True return _superlu.gstrf(N, A.nnz, A.data, indices, indptr, csc_construct_func=csc_construct_func, ilu=False, options=_options) def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=None): """ Compute an incomplete LU decomposition for a sparse, square matrix. The resulting object is an approximation to the inverse of `A`. Parameters ---------- A : (N, N) array_like Sparse matrix to factorize. Most efficient when provided in CSC format. Other formats will be converted to CSC before factorization. drop_tol : float, optional Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. (default: 1e-4) fill_factor : float, optional Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) drop_rule : str, optional Comma-separated string of drop rules to use. Available rules: ``basic``, ``prows``, ``column``, ``area``, ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) See SuperLU documentation for details. Remaining other options Same as for `splu` Returns ------- invA_approx : scipy.sparse.linalg.SuperLU Object, which has a ``solve`` method. See also -------- splu : complete LU decomposition Notes ----- To improve the better approximation to the inverse, you may need to increase `fill_factor` AND decrease `drop_tol`. This function uses the SuperLU library. Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> from scipy.sparse.linalg import spilu >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) >>> B = spilu(A) >>> x = np.array([1., 2., 3.], dtype=float) >>> B.solve(x) array([ 1. , -3. , -1.5]) >>> A.dot(B.solve(x)) array([ 1., 2., 3.]) >>> B.solve(A.dot(x)) array([ 1., 2., 3.]) """ if is_pydata_spmatrix(A): def csc_construct_func(*a, cls=type(A)): return cls(csc_matrix(*a)) A = A.to_scipy_sparse().tocsc() else: csc_construct_func = csc_matrix if not (issparse(A) and A.format == "csc"): A = csc_matrix(A) warn('spilu converted its input to CSC format', SparseEfficiencyWarning) # sum duplicates for non-canonical format A.sum_duplicates() A = A._asfptype() # upcast to a floating point format M, N = A.shape if (M != N): raise ValueError("can only factor square matrices") # is this true? indices, indptr = _safe_downcast_indices(A) _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, ILU_FillFactor=fill_factor, DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, PanelSize=panel_size, Relax=relax) if options is not None: _options.update(options) # Ensure that no column permutations are applied if (_options["ColPerm"] == "NATURAL"): _options["SymmetricMode"] = True return _superlu.gstrf(N, A.nnz, A.data, indices, indptr, csc_construct_func=csc_construct_func, ilu=True, options=_options) def factorized(A): """ Return a function for solving a sparse linear system, with A pre-factorized. Parameters ---------- A : (N, N) array_like Input. A in CSC format is most efficient. A CSR format matrix will be converted to CSC before factorization. Returns ------- solve : callable To solve the linear system of equations given in `A`, the `solve` callable should be passed an ndarray of shape (N,). Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import factorized >>> A = np.array([[ 3. , 2. , -1. ], ... [ 2. , -2. , 4. ], ... [-1. , 0.5, -1. ]]) >>> solve = factorized(A) # Makes LU decomposition. >>> rhs1 = np.array([1, -2, 0]) >>> solve(rhs1) # Uses the LU factors. array([ 1., -2., -2.]) """ if is_pydata_spmatrix(A): A = A.to_scipy_sparse().tocsc() if useUmfpack: if noScikit: raise RuntimeError('Scikits.umfpack not installed.') if not (issparse(A) and A.format == "csc"): A = csc_matrix(A) warn('splu converted its input to CSC format', SparseEfficiencyWarning) A = A._asfptype() # upcast to a floating point format if A.dtype.char not in 'dD': raise ValueError("convert matrix data to double, please, using" " .astype(), or set linsolve.useUmfpack = False") umf_family, A = _get_umf_family(A) umf = umfpack.UmfpackContext(umf_family) # Make LU decomposition. umf.numeric(A) def solve(b): with np.errstate(divide="ignore", invalid="ignore"): # Ignoring warnings with numpy >= 1.23.0, see gh-16523 result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True) return result return solve else: return splu(A).solve def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False, unit_diagonal=False): """ Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix. Parameters ---------- A : (M, M) sparse matrix A sparse square triangular matrix. Should be in CSR format. b : (M,) or (M, N) array_like Right-hand side matrix in ``A x = b`` lower : bool, optional Whether `A` is a lower or upper triangular matrix. Default is lower triangular matrix. overwrite_A : bool, optional Allow changing `A`. The indices of `A` are going to be sorted and zero entries are going to be removed. Enabling gives a performance gain. Default is False. overwrite_b : bool, optional Allow overwriting data in `b`. Enabling gives a performance gain. Default is False. If `overwrite_b` is True, it should be ensured that `b` has an appropriate dtype to be able to store the result. unit_diagonal : bool, optional If True, diagonal elements of `a` are assumed to be 1 and will not be referenced. .. versionadded:: 1.4.0 Returns ------- x : (M,) or (M, N) ndarray Solution to the system ``A x = b``. Shape of return matches shape of `b`. Raises ------ LinAlgError If `A` is singular or not triangular. ValueError If shape of `A` or shape of `b` do not match the requirements. Notes ----- .. versionadded:: 0.19.0 Examples -------- >>> import numpy as np >>> from scipy.sparse import csr_matrix >>> from scipy.sparse.linalg import spsolve_triangular >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float) >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float) >>> x = spsolve_triangular(A, B) >>> np.allclose(A.dot(x), B) True """ if is_pydata_spmatrix(A): A = A.to_scipy_sparse().tocsr() # Check the input for correct type and format. if not (issparse(A) and A.format == "csr"): warn('CSR matrix format is required. Converting to CSR matrix.', SparseEfficiencyWarning) A = csr_matrix(A) elif not overwrite_A: A = A.copy() if A.shape[0] != A.shape[1]: raise ValueError( f'A must be a square matrix but its shape is {A.shape}.') # sum duplicates for non-canonical format A.sum_duplicates() b = np.asanyarray(b) if b.ndim not in [1, 2]: raise ValueError( f'b must have 1 or 2 dims but its shape is {b.shape}.') if A.shape[0] != b.shape[0]: raise ValueError( 'The size of the dimensions of A must be equal to ' 'the size of the first dimension of b but the shape of A is ' '{} and the shape of b is {}.'.format(A.shape, b.shape)) # Init x as (a copy of) b. x_dtype = np.result_type(A.data, b, np.float64) if overwrite_b: if np.can_cast(b.dtype, x_dtype, casting='same_kind'): x = b else: raise ValueError( 'Cannot overwrite b (dtype {}) with result ' 'of type {}.'.format(b.dtype, x_dtype)) else: x = b.astype(x_dtype, copy=True) # Choose forward or backward order. if lower: row_indices = range(len(b)) else: row_indices = range(len(b) - 1, -1, -1) # Fill x iteratively. for i in row_indices: # Get indices for i-th row. indptr_start = A.indptr[i] indptr_stop = A.indptr[i + 1] if lower: A_diagonal_index_row_i = indptr_stop - 1 A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1) else: A_diagonal_index_row_i = indptr_start A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop) # Check regularity and triangularity of A. if not unit_diagonal and (indptr_stop <= indptr_start or A.indices[A_diagonal_index_row_i] < i): raise LinAlgError( f'A is singular: diagonal {i} is zero.') if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i: raise LinAlgError( 'A is not triangular: A[{}, {}] is nonzero.' ''.format(i, A.indices[A_diagonal_index_row_i])) # Incorporate off-diagonal entries. A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i] A_values_in_row_i = A.data[A_off_diagonal_indices_row_i] x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i) # Compute i-th entry of x. if not unit_diagonal: x[i] /= A.data[A_diagonal_index_row_i] return x
26,180
34.236878
80
py
scipy
scipy-main/scipy/sparse/linalg/_dsolve/_add_newdocs.py
from numpy.lib import add_newdoc add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', """ LU factorization of a sparse matrix. Factorization is represented as:: Pr @ A @ Pc = L @ U To construct these `SuperLU` objects, call the `splu` and `spilu` functions. Attributes ---------- shape nnz perm_c perm_r L U Methods ------- solve Notes ----- .. versionadded:: 0.14.0 Examples -------- The LU decomposition can be used to solve matrix equations. Consider: >>> import numpy as np >>> from scipy.sparse import csc_matrix, linalg as sla >>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]]) This can be solved for a given right-hand side: >>> lu = sla.splu(A) >>> b = np.array([1, 2, 3, 4]) >>> x = lu.solve(b) >>> A.dot(x) array([ 1., 2., 3., 4.]) The ``lu`` object also contains an explicit representation of the decomposition. The permutations are represented as mappings of indices: >>> lu.perm_r array([0, 2, 1, 3], dtype=int32) >>> lu.perm_c array([2, 0, 1, 3], dtype=int32) The L and U factors are sparse matrices in CSC format: >>> lu.L.A array([[ 1. , 0. , 0. , 0. ], [ 0. , 1. , 0. , 0. ], [ 0. , 0. , 1. , 0. ], [ 1. , 0.5, 0.5, 1. ]]) >>> lu.U.A array([[ 2., 0., 1., 4.], [ 0., 2., 1., 1.], [ 0., 0., 1., 1.], [ 0., 0., 0., -5.]]) The permutation matrices can be constructed: >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4)))) >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c))) We can reassemble the original matrix: >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).A array([[ 1., 2., 0., 4.], [ 1., 0., 0., 1.], [ 1., 0., 2., 1.], [ 2., 2., 1., 0.]]) """) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve', """ solve(rhs[, trans]) Solves linear system of equations with one or several right-hand sides. Parameters ---------- rhs : ndarray, shape (n,) or (n, k) Right hand side(s) of equation trans : {'N', 'T', 'H'}, optional Type of system to solve:: 'N': A @ x == rhs (default) 'T': A^T @ x == rhs 'H': A^H @ x == rhs i.e., normal, transposed, and hermitian conjugate. Returns ------- x : ndarray, shape ``rhs.shape`` Solution vector(s) """)) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L', """ Lower triangular factor with unit diagonal as a `scipy.sparse.csc_matrix`. .. versionadded:: 0.14.0 """)) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U', """ Upper triangular factor as a `scipy.sparse.csc_matrix`. .. versionadded:: 0.14.0 """)) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape', """ Shape of the original matrix as a tuple of ints. """)) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz', """ Number of nonzero elements in the matrix. """)) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c', """ Permutation Pc represented as an array of indices. The column permutation matrix can be reconstructed via: >>> Pc = np.zeros((n, n)) >>> Pc[np.arange(n), perm_c] = 1 """)) add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r', """ Permutation Pr represented as an array of indices. The row permutation matrix can be reconstructed via: >>> Pr = np.zeros((n, n)) >>> Pr[perm_r, np.arange(n)] = 1 """))
3,795
23.810458
75
py
scipy
scipy-main/scipy/sparse/linalg/_dsolve/__init__.py
""" Linear Solvers ============== The default solver is SuperLU (included in the scipy distribution), which can solve real or complex linear systems in both single and double precisions. It is automatically replaced by UMFPACK, if available. Note that UMFPACK works in double precision only, so switch it off by:: >>> use_solver(useUmfpack=False) to solve in the single precision. See also use_solver documentation. Example session:: >>> from scipy.sparse import csc_matrix, spdiags >>> from numpy import array >>> from scipy.sparse.linalg import spsolve, use_solver >>> >>> print("Inverting a sparse linear system:") >>> print("The sparse matrix (constructed from diagonals):") >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) >>> b = array([1, 2, 3, 4, 5]) >>> print("Solve: single precision complex:") >>> use_solver( useUmfpack = False ) >>> a = a.astype('F') >>> x = spsolve(a, b) >>> print(x) >>> print("Error: ", a@x-b) >>> >>> print("Solve: double precision complex:") >>> use_solver( useUmfpack = True ) >>> a = a.astype('D') >>> x = spsolve(a, b) >>> print(x) >>> print("Error: ", a@x-b) >>> >>> print("Solve: double precision:") >>> a = a.astype('d') >>> x = spsolve(a, b) >>> print(x) >>> print("Error: ", a@x-b) >>> >>> print("Solve: single precision:") >>> use_solver( useUmfpack = False ) >>> a = a.astype('f') >>> x = spsolve(a, b.astype('f')) >>> print(x) >>> print("Error: ", a@x-b) """ #import umfpack #__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) #del umfpack from .linsolve import * from ._superlu import SuperLU from . import _add_newdocs from . import linsolve __all__ = [ 'MatrixRankWarning', 'SuperLU', 'factorized', 'spilu', 'splu', 'spsolve', 'spsolve_triangular', 'use_solver' ] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
1,991
26.666667
70
py
scipy
scipy-main/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py
import sys import threading import numpy as np from numpy import array, finfo, arange, eye, all, unique, ones, dot import numpy.random as random from numpy.testing import ( assert_array_almost_equal, assert_almost_equal, assert_equal, assert_array_equal, assert_, assert_allclose, assert_warns, suppress_warnings) import pytest from pytest import raises as assert_raises import scipy.linalg from scipy.linalg import norm, inv from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix, csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix) from scipy.sparse.linalg import SuperLU from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu, MatrixRankWarning, _superlu, spsolve_triangular, factorized) import scipy.sparse from scipy._lib._testutils import check_free_memory sup_sparse_efficiency = suppress_warnings() sup_sparse_efficiency.filter(SparseEfficiencyWarning) # scikits.umfpack is not a SciPy dependency but it is optionally used in # dsolve, so check whether it's available try: import scikits.umfpack as umfpack has_umfpack = True except ImportError: has_umfpack = False def toarray(a): if issparse(a): return a.toarray() else: return a def setup_bug_8278(): N = 2 ** 6 h = 1/N Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1], shape=(N-1, N-1))/(h**2) eyeN = scipy.sparse.eye(N - 1) A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D)) + scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN)) + scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN))) b = np.random.rand((N-1)**3) return A, b class TestFactorized: def setup_method(self): n = 5 d = arange(n) + 1 self.n = n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc() random.seed(1234) def _check_singular(self): A = csc_matrix((5,5), dtype='d') b = ones(5) assert_array_almost_equal(0. * b, factorized(A)(b)) def _check_non_singular(self): # Make a diagonal dominant, to make sure it is not singular n = 5 a = csc_matrix(random.rand(n, n)) b = ones(n) expected = splu(a).solve(b) assert_array_almost_equal(factorized(a)(b), expected) def test_singular_without_umfpack(self): use_solver(useUmfpack=False) with assert_raises(RuntimeError, match="Factor is exactly singular"): self._check_singular() @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_singular_with_umfpack(self): use_solver(useUmfpack=True) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars") assert_warns(umfpack.UmfpackWarning, self._check_singular) def test_non_singular_without_umfpack(self): use_solver(useUmfpack=False) self._check_non_singular() @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_non_singular_with_umfpack(self): use_solver(useUmfpack=True) self._check_non_singular() def test_cannot_factorize_nonsquare_matrix_without_umfpack(self): use_solver(useUmfpack=False) msg = "can only factor square matrices" with assert_raises(ValueError, match=msg): factorized(self.A[:, :4]) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_factorizes_nonsquare_matrix_with_umfpack(self): use_solver(useUmfpack=True) # does not raise factorized(self.A[:,:4]) def test_call_with_incorrectly_sized_matrix_without_umfpack(self): use_solver(useUmfpack=False) solve = factorized(self.A) b = random.rand(4) B = random.rand(4, 3) BB = random.rand(self.n, 3, 9) with assert_raises(ValueError, match="is of incompatible size"): solve(b) with assert_raises(ValueError, match="is of incompatible size"): solve(B) with assert_raises(ValueError, match="object too deep for desired array"): solve(BB) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_call_with_incorrectly_sized_matrix_with_umfpack(self): use_solver(useUmfpack=True) solve = factorized(self.A) b = random.rand(4) B = random.rand(4, 3) BB = random.rand(self.n, 3, 9) # does not raise solve(b) msg = "object too deep for desired array" with assert_raises(ValueError, match=msg): solve(B) with assert_raises(ValueError, match=msg): solve(BB) def test_call_with_cast_to_complex_without_umfpack(self): use_solver(useUmfpack=False) solve = factorized(self.A) b = random.rand(4) for t in [np.complex64, np.complex128]: with assert_raises(TypeError, match="Cannot cast array data"): solve(b.astype(t)) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_call_with_cast_to_complex_with_umfpack(self): use_solver(useUmfpack=True) solve = factorized(self.A) b = random.rand(4) for t in [np.complex64, np.complex128]: assert_warns(np.ComplexWarning, solve, b.astype(t)) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_assume_sorted_indices_flag(self): # a sparse matrix with unsorted indices unsorted_inds = np.array([2, 0, 1, 0]) data = np.array([10, 16, 5, 0.4]) indptr = np.array([0, 1, 2, 4]) A = csc_matrix((data, unsorted_inds, indptr), (3, 3)) b = ones(3) # should raise when incorrectly assuming indices are sorted use_solver(useUmfpack=True, assumeSortedIndices=True) with assert_raises(RuntimeError, match="UMFPACK_ERROR_invalid_matrix"): factorized(A) # should sort indices and succeed when not assuming indices are sorted use_solver(useUmfpack=True, assumeSortedIndices=False) expected = splu(A.copy()).solve(b) assert_equal(A.has_sorted_indices, 0) assert_array_almost_equal(factorized(A)(b), expected) @pytest.mark.slow @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_bug_8278(self): check_free_memory(8000) use_solver(useUmfpack=True) A, b = setup_bug_8278() A = A.tocsc() f = factorized(A) x = f(b) assert_array_almost_equal(A @ x, b) class TestLinsolve: def setup_method(self): use_solver(useUmfpack=False) def test_singular(self): A = csc_matrix((5,5), dtype='d') b = array([1, 2, 3, 4, 5],dtype='d') with suppress_warnings() as sup: sup.filter(MatrixRankWarning, "Matrix is exactly singular") x = spsolve(A, b) assert_(not np.isfinite(x).any()) def test_singular_gh_3312(self): # "Bad" test case that leads SuperLU to call LAPACK with invalid # arguments. Check that it fails moderately gracefully. ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) A = csc_matrix((v, ij.T), shape=(20, 20)) b = np.arange(20) try: # should either raise a runtime error or return value # appropriate for singular input (which yields the warning) with suppress_warnings() as sup: sup.filter(MatrixRankWarning, "Matrix is exactly singular") x = spsolve(A, b) assert not np.isfinite(x).any() except RuntimeError: pass @pytest.mark.parametrize('format', ['csc', 'csr']) @pytest.mark.parametrize('idx_dtype', [np.int32, np.int64]) def test_twodiags(self, format: str, idx_dtype: np.dtype): A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5, format=format) b = array([1, 2, 3, 4, 5]) # condition number of A cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2) for t in ['f','d','F','D']: eps = finfo(t).eps # floating point epsilon b = b.astype(t) Asp = A.astype(t) Asp.indices = Asp.indices.astype(idx_dtype, copy=False) Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False) x = spsolve(Asp, b) assert_(norm(b - Asp@x) < 10 * cond_A * eps) def test_bvector_smoketest(self): Adense = array([[0., 1., 1.], [1., 0., 1.], [0., 0., 1.]]) As = csc_matrix(Adense) random.seed(1234) x = random.randn(3) b = As@x x2 = spsolve(As, b) assert_array_almost_equal(x, x2) def test_bmatrix_smoketest(self): Adense = array([[0., 1., 1.], [1., 0., 1.], [0., 0., 1.]]) As = csc_matrix(Adense) random.seed(1234) x = random.randn(3, 4) Bdense = As.dot(x) Bs = csc_matrix(Bdense) x2 = spsolve(As, Bs) assert_array_almost_equal(x, x2.toarray()) @sup_sparse_efficiency def test_non_square(self): # A is not square. A = ones((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve, A, b) # A2 and b2 have incompatible shapes. A2 = csc_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve, A2, b2) @sup_sparse_efficiency def test_example_comparison(self): row = array([0,0,1,2,2,2]) col = array([0,2,2,0,1,2]) data = array([1,2,3,-4,5,6]) sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) M = sM.toarray() row = array([0,0,1,1,0,0]) col = array([0,2,1,1,0,0]) data = array([1,1,1,1,1,1]) sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) N = sN.toarray() sX = spsolve(sM, sN) X = scipy.linalg.solve(M, N) assert_array_almost_equal(X, sX.toarray()) @sup_sparse_efficiency @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_shape_compatibility(self): use_solver(useUmfpack=True) A = csc_matrix([[1., 0], [0, 2]]) bs = [ [1, 6], array([1, 6]), [[1], [6]], array([[1], [6]]), csc_matrix([[1], [6]]), csr_matrix([[1], [6]]), dok_matrix([[1], [6]]), bsr_matrix([[1], [6]]), array([[1., 2., 3.], [6., 8., 10.]]), csc_matrix([[1., 2., 3.], [6., 8., 10.]]), csr_matrix([[1., 2., 3.], [6., 8., 10.]]), dok_matrix([[1., 2., 3.], [6., 8., 10.]]), bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), ] for b in bs: x = np.linalg.solve(A.toarray(), toarray(b)) for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: x1 = spsolve(spmattype(A), b, use_umfpack=True) x2 = spsolve(spmattype(A), b, use_umfpack=False) # check solution if x.ndim == 2 and x.shape[1] == 1: # interprets also these as "vectors" x = x.ravel() assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1))) assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2))) # dense vs. sparse output ("vectors" are always dense) if issparse(b) and x.ndim > 1: assert_(issparse(x1), repr((b, spmattype, 1))) assert_(issparse(x2), repr((b, spmattype, 2))) else: assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) # check output shape if x.ndim == 1: # "vector" assert_equal(x1.shape, (A.shape[1],)) assert_equal(x2.shape, (A.shape[1],)) else: # "matrix" assert_equal(x1.shape, x.shape) assert_equal(x2.shape, x.shape) A = csc_matrix((3, 3)) b = csc_matrix((1, 3)) assert_raises(ValueError, spsolve, A, b) @sup_sparse_efficiency def test_ndarray_support(self): A = array([[1., 2.], [2., 0.]]) x = array([[1., 1.], [0.5, -0.5]]) b = array([[2., 0.], [2., 2.]]) assert_array_almost_equal(x, spsolve(A, b)) def test_gssv_badinput(self): N = 10 d = arange(N) + 1.0 A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N) for spmatrix in (csc_matrix, csr_matrix): A = spmatrix(A) b = np.arange(N) def not_c_contig(x): return x.repeat(2)[::2] def not_1dim(x): return x[:,None] def bad_type(x): return x.astype(bool) def too_short(x): return x[:-1] badops = [not_c_contig, not_1dim, bad_type, too_short] for badop in badops: msg = f"{spmatrix!r} {badop!r}" # Not C-contiguous assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, badop(A.data), A.indices, A.indptr, b, int(spmatrix == csc_matrix), err_msg=msg) assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, A.data, badop(A.indices), A.indptr, b, int(spmatrix == csc_matrix), err_msg=msg) assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, A.data, A.indices, badop(A.indptr), b, int(spmatrix == csc_matrix), err_msg=msg) def test_sparsity_preservation(self): ident = csc_matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) b = csc_matrix([ [0, 1], [1, 0], [0, 0]]) x = spsolve(ident, b) assert_equal(ident.nnz, 3) assert_equal(b.nnz, 2) assert_equal(x.nnz, 2) assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12) def test_dtype_cast(self): A_real = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) A_complex = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5 + 1j]]) b_real = np.array([1,1,1]) b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1]) x = spsolve(A_real, b_real) assert_(np.issubdtype(x.dtype, np.floating)) x = spsolve(A_real, b_complex) assert_(np.issubdtype(x.dtype, np.complexfloating)) x = spsolve(A_complex, b_real) assert_(np.issubdtype(x.dtype, np.complexfloating)) x = spsolve(A_complex, b_complex) assert_(np.issubdtype(x.dtype, np.complexfloating)) @pytest.mark.slow @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_bug_8278(self): check_free_memory(8000) use_solver(useUmfpack=True) A, b = setup_bug_8278() x = spsolve(A, b) assert_array_almost_equal(A @ x, b) class TestSplu: def setup_method(self): use_solver(useUmfpack=False) n = 40 d = arange(n) + 1 self.n = n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc') random.seed(1234) def _smoketest(self, spxlu, check, dtype, idx_dtype): if np.issubdtype(dtype, np.complexfloating): A = self.A + 1j*self.A.T else: A = self.A A = A.astype(dtype) A.indices = A.indices.astype(idx_dtype, copy=False) A.indptr = A.indptr.astype(idx_dtype, copy=False) lu = spxlu(A) rng = random.RandomState(1234) # Input shapes for k in [None, 1, 2, self.n, self.n+2]: msg = f"k={k!r}" if k is None: b = rng.rand(self.n) else: b = rng.rand(self.n, k) if np.issubdtype(dtype, np.complexfloating): b = b + 1j*rng.rand(*b.shape) b = b.astype(dtype) x = lu.solve(b) check(A, b, x, msg) x = lu.solve(b, 'T') check(A.T, b, x, msg) x = lu.solve(b, 'H') check(A.T.conj(), b, x, msg) @sup_sparse_efficiency def test_splu_smoketest(self): self._internal_test_splu_smoketest() def _internal_test_splu_smoketest(self): # Check that splu works at all def check(A, b, x, msg=""): eps = np.finfo(A.dtype).eps r = A @ x assert_(abs(r - b).max() < 1e3*eps, msg) for dtype in [np.float32, np.float64, np.complex64, np.complex128]: for idx_dtype in [np.int32, np.int64]: self._smoketest(splu, check, dtype, idx_dtype) @sup_sparse_efficiency def test_spilu_smoketest(self): self._internal_test_spilu_smoketest() def _internal_test_spilu_smoketest(self): errors = [] def check(A, b, x, msg=""): r = A @ x err = abs(r - b).max() assert_(err < 1e-2, msg) if b.dtype in (np.float64, np.complex128): errors.append(err) for dtype in [np.float32, np.float64, np.complex64, np.complex128]: for idx_dtype in [np.int32, np.int64]: self._smoketest(spilu, check, dtype, idx_dtype) assert_(max(errors) > 1e-5) @sup_sparse_efficiency def test_spilu_drop_rule(self): # Test passing in the drop_rule argument to spilu. A = identity(2) rules = [ b'basic,area'.decode('ascii'), # unicode b'basic,area', # ascii [b'basic', b'area'.decode('ascii')] ] for rule in rules: # Argument should be accepted assert_(isinstance(spilu(A, drop_rule=rule), SuperLU)) def test_splu_nnz0(self): A = csc_matrix((5,5), dtype='d') assert_raises(RuntimeError, splu, A) def test_spilu_nnz0(self): A = csc_matrix((5,5), dtype='d') assert_raises(RuntimeError, spilu, A) def test_splu_basic(self): # Test basic splu functionality. n = 30 rng = random.RandomState(12) a = rng.rand(n, n) a[a < 0.95] = 0 # First test with a singular matrix a[:, 0] = 0 a_ = csc_matrix(a) # Matrix is exactly singular assert_raises(RuntimeError, splu, a_) # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) b = ones(n) x = lu.solve(b) assert_almost_equal(dot(a, x), b) def test_splu_perm(self): # Test the permutation vectors exposed by splu. n = 30 a = random.random((n, n)) a[a < 0.95] = 0 # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) # Check that the permutation indices do belong to [0, n-1]. for perm in (lu.perm_r, lu.perm_c): assert_(all(perm > -1)) assert_(all(perm < n)) assert_equal(len(unique(perm)), len(perm)) # Now make a symmetric, and test that the two permutation vectors are # the same # Note: a += a.T relies on undefined behavior. a = a + a.T a_ = csc_matrix(a) lu = splu(a_) assert_array_equal(lu.perm_r, lu.perm_c) @pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)]) def test_natural_permc(self, splu_fun, rtol): # Test that the "NATURAL" permc_spec does not permute the matrix np.random.seed(42) n = 500 p = 0.01 A = scipy.sparse.random(n, n, p) x = np.random.rand(n) # Make A diagonal dominant to make sure it is not singular A += (n+1)*scipy.sparse.identity(n) A_ = csc_matrix(A) b = A_ @ x # without permc_spec, permutation is not identity lu = splu_fun(A_) assert_(np.any(lu.perm_c != np.arange(n))) # with permc_spec="NATURAL", permutation is identity lu = splu_fun(A_, permc_spec="NATURAL") assert_array_equal(lu.perm_c, np.arange(n)) # Also, lu decomposition is valid x2 = lu.solve(b) assert_allclose(x, x2, rtol=rtol) @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount") def test_lu_refcount(self): # Test that we are keeping track of the reference count with splu. n = 30 a = random.random((n, n)) a[a < 0.95] = 0 # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) # And now test that we don't have a refcount bug rc = sys.getrefcount(lu) for attr in ('perm_r', 'perm_c'): perm = getattr(lu, attr) assert_equal(sys.getrefcount(lu), rc + 1) del perm assert_equal(sys.getrefcount(lu), rc) def test_bad_inputs(self): A = self.A.tocsc() assert_raises(ValueError, splu, A[:,:4]) assert_raises(ValueError, spilu, A[:,:4]) for lu in [splu(A), spilu(A)]: b = random.rand(42) B = random.rand(42, 3) BB = random.rand(self.n, 3, 9) assert_raises(ValueError, lu.solve, b) assert_raises(ValueError, lu.solve, B) assert_raises(ValueError, lu.solve, BB) assert_raises(TypeError, lu.solve, b.astype(np.complex64)) assert_raises(TypeError, lu.solve, b.astype(np.complex128)) @sup_sparse_efficiency def test_superlu_dlamch_i386_nan(self): # SuperLU 4.3 calls some functions returning floats without # declaring them. On i386@linux call convention, this fails to # clear floating point registers after call. As a result, NaN # can appear in the next floating point operation made. # # Here's a test case that triggered the issue. n = 8 d = np.arange(n) + 1 A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) A = A.astype(np.float32) spilu(A) A = A + 1j*A B = A.A assert_(not np.isnan(B).any()) @sup_sparse_efficiency def test_lu_attr(self): def check(dtype, complex_2=False): A = self.A.astype(dtype) if complex_2: A = A + 1j*A.T n = A.shape[0] lu = splu(A) # Check that the decomposition is as advertized Pc = np.zeros((n, n)) Pc[np.arange(n), lu.perm_c] = 1 Pr = np.zeros((n, n)) Pr[lu.perm_r, np.arange(n)] = 1 Ad = A.toarray() lhs = Pr.dot(Ad).dot(Pc) rhs = (lu.L @ lu.U).toarray() eps = np.finfo(dtype).eps assert_allclose(lhs, rhs, atol=100*eps) check(np.float32) check(np.float64) check(np.complex64) check(np.complex128) check(np.complex64, True) check(np.complex128, True) @pytest.mark.slow @sup_sparse_efficiency def test_threads_parallel(self): oks = [] def worker(): try: self.test_splu_basic() self._internal_test_splu_smoketest() self._internal_test_spilu_smoketest() oks.append(True) except Exception: pass threads = [threading.Thread(target=worker) for k in range(20)] for t in threads: t.start() for t in threads: t.join() assert_equal(len(oks), 20) class TestSpsolveTriangular: def setup_method(self): use_solver(useUmfpack=False) def test_zero_diagonal(self): n = 5 rng = np.random.default_rng(43876432987) A = rng.standard_normal((n, n)) b = np.arange(n) A = scipy.sparse.tril(A, k=0, format='csr') x = spsolve_triangular(A, b, unit_diagonal=True, lower=True) A.setdiag(1) assert_allclose(A.dot(x), b) # Regression test from gh-15199 A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64) b = np.array([1., 2., 3.]) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "CSR matrix format is") spsolve_triangular(A, b, unit_diagonal=True) def test_singular(self): n = 5 A = csr_matrix((n, n)) b = np.arange(n) for lower in (True, False): assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower) @sup_sparse_efficiency def test_bad_shape(self): # A is not square. A = np.zeros((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve_triangular, A, b) # A2 and b2 have incompatible shapes. A2 = csr_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve_triangular, A2, b2) @sup_sparse_efficiency def test_input_types(self): A = array([[1., 0.], [1., 2.]]) b = array([[2., 0.], [2., 2.]]) for matrix_type in (array, csc_matrix, csr_matrix): x = spsolve_triangular(matrix_type(A), b, lower=True) assert_array_almost_equal(A.dot(x), b) @pytest.mark.slow @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job @sup_sparse_efficiency def test_random(self): def random_triangle_matrix(n, lower=True): A = scipy.sparse.random(n, n, density=0.1, format='coo') if lower: A = scipy.sparse.tril(A) else: A = scipy.sparse.triu(A) A = A.tocsr(copy=False) for i in range(n): A[i, i] = np.random.rand() + 1 return A np.random.seed(1234) for lower in (True, False): for n in (10, 10**2, 10**3): A = random_triangle_matrix(n, lower=lower) for m in (1, 10): for b in (np.random.rand(n, m), np.random.randint(-9, 9, (n, m)), np.random.randint(-9, 9, (n, m)) + np.random.randint(-9, 9, (n, m)) * 1j): x = spsolve_triangular(A, b, lower=lower) assert_array_almost_equal(A.dot(x), b) x = spsolve_triangular(A, b, lower=lower, unit_diagonal=True) A.setdiag(1) assert_array_almost_equal(A.dot(x), b)
27,633
33.456359
90
py
scipy
scipy-main/scipy/sparse/linalg/_dsolve/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/sparse/linalg/tests/test_norm.py
"""Test functions for the sparse.linalg.norm module """ import pytest import numpy as np from numpy.linalg import norm as npnorm from numpy.testing import assert_allclose, assert_equal from pytest import raises as assert_raises import scipy.sparse from scipy.sparse.linalg import norm as spnorm # https://github.com/scipy/scipy/issues/16031 def test_sparray_norm(): row = np.array([0, 0, 1, 1]) col = np.array([0, 1, 2, 3]) data = np.array([4, 5, 7, 9]) test_arr = scipy.sparse.coo_array((data, (row, col)), shape=(2, 4)) test_mat = scipy.sparse.coo_matrix((data, (row, col)), shape=(2, 4)) assert_equal(spnorm(test_arr, ord=1, axis=0), np.array([4, 5, 7, 9])) assert_equal(spnorm(test_mat, ord=1, axis=0), np.array([4, 5, 7, 9])) assert_equal(spnorm(test_arr, ord=1, axis=1), np.array([9, 16])) assert_equal(spnorm(test_mat, ord=1, axis=1), np.array([9, 16])) class TestNorm: def setup_method(self): a = np.arange(9) - 4 b = a.reshape((3, 3)) self.b = scipy.sparse.csr_matrix(b) def test_matrix_norm(self): # Frobenius norm is the default assert_allclose(spnorm(self.b), 7.745966692414834) assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834) assert_allclose(spnorm(self.b, np.inf), 9) assert_allclose(spnorm(self.b, -np.inf), 2) assert_allclose(spnorm(self.b, 1), 7) assert_allclose(spnorm(self.b, -1), 6) # Only floating or complex floating dtype supported by svds. with pytest.warns(UserWarning, match="The problem size"): assert_allclose(spnorm(self.b.astype(np.float64), 2), 7.348469228349534) # _multi_svd_norm is not implemented for sparse matrix assert_raises(NotImplementedError, spnorm, self.b, -2) def test_matrix_norm_axis(self): for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))): assert_allclose(spnorm(m, axis=axis), 7.745966692414834) assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834) assert_allclose(spnorm(m, np.inf, axis=axis), 9) assert_allclose(spnorm(m, -np.inf, axis=axis), 2) assert_allclose(spnorm(m, 1, axis=axis), 7) assert_allclose(spnorm(m, -1, axis=axis), 6) def test_vector_norm(self): v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398] for m, a in (self.b, 0), (self.b.T, 1): for axis in a, (a, ), a-2, (a-2, ): assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7]) assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4]) assert_allclose(spnorm(m, axis=axis), v) assert_allclose(spnorm(m, ord=2, axis=axis), v) assert_allclose(spnorm(m, ord=None, axis=axis), v) def test_norm_exceptions(self): m = self.b assert_raises(TypeError, spnorm, m, None, 1.5) assert_raises(TypeError, spnorm, m, None, [2]) assert_raises(ValueError, spnorm, m, None, ()) assert_raises(ValueError, spnorm, m, None, (0, 1, 2)) assert_raises(ValueError, spnorm, m, None, (0, 0)) assert_raises(ValueError, spnorm, m, None, (0, 2)) assert_raises(ValueError, spnorm, m, None, (-3, 0)) assert_raises(ValueError, spnorm, m, None, 2) assert_raises(ValueError, spnorm, m, None, -3) assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0) assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1)) class TestVsNumpyNorm: _sparse_types = ( scipy.sparse.bsr_matrix, scipy.sparse.coo_matrix, scipy.sparse.csc_matrix, scipy.sparse.csr_matrix, scipy.sparse.dia_matrix, scipy.sparse.dok_matrix, scipy.sparse.lil_matrix, ) _test_matrices = ( (np.arange(9) - 4).reshape((3, 3)), [ [1, 2, 3], [-1, 1, 4]], [ [1, 0, 3], [-1, 1, 4j]], ) def test_sparse_matrix_norms(self): for sparse_type in self._sparse_types: for M in self._test_matrices: S = sparse_type(M) assert_allclose(spnorm(S), npnorm(M)) assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro')) assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf)) assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf)) assert_allclose(spnorm(S, 1), npnorm(M, 1)) assert_allclose(spnorm(S, -1), npnorm(M, -1)) def test_sparse_matrix_norms_with_axis(self): for sparse_type in self._sparse_types: for M in self._test_matrices: S = sparse_type(M) for axis in None, (0, 1), (1, 0): assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis)) for ord in 'fro', np.inf, -np.inf, 1, -1: assert_allclose(spnorm(S, ord, axis=axis), npnorm(M, ord, axis=axis)) # Some numpy matrix norms are allergic to negative axes. for axis in (-2, -1), (-1, -2), (1, -2): assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis)) assert_allclose(spnorm(S, 'f', axis=axis), npnorm(M, 'f', axis=axis)) assert_allclose(spnorm(S, 'fro', axis=axis), npnorm(M, 'fro', axis=axis)) def test_sparse_vector_norms(self): for sparse_type in self._sparse_types: for M in self._test_matrices: S = sparse_type(M) for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )): assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis)) for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42: assert_allclose(spnorm(S, ord, axis=axis), npnorm(M, ord, axis=axis))
6,163
42.408451
79
py
scipy
scipy-main/scipy/sparse/linalg/tests/test_matfuncs.py
# # Created by: Pearu Peterson, March 2002 # """ Test functions for scipy.linalg._matfuncs module """ import math import numpy as np from numpy import array, eye, exp, random from numpy.linalg import matrix_power from numpy.testing import ( assert_allclose, assert_, assert_array_almost_equal, assert_equal, assert_array_almost_equal_nulp, suppress_warnings) from scipy.sparse import csc_matrix, SparseEfficiencyWarning from scipy.sparse._construct import eye as speye from scipy.sparse.linalg._matfuncs import (expm, _expm, ProductOperator, MatrixPowerOperator, _onenorm_matrix_power_nnm) from scipy.sparse._sputils import matrix from scipy.linalg import logm from scipy.special import factorial, binom import scipy.sparse import scipy.sparse.linalg def _burkardt_13_power(n, p): """ A helper function for testing matrix functions. Parameters ---------- n : integer greater than 1 Order of the square matrix to be returned. p : non-negative integer Power of the matrix. Returns ------- out : ndarray representing a square matrix A Forsythe matrix of order n, raised to the power p. """ # Input validation. if n != int(n) or n < 2: raise ValueError('n must be an integer greater than 1') n = int(n) if p != int(p) or p < 0: raise ValueError('p must be a non-negative integer') p = int(p) # Construct the matrix explicitly. a, b = divmod(p, n) large = np.power(10.0, -n*a) small = large * np.power(10.0, -n) return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n) def test_onenorm_matrix_power_nnm(): np.random.seed(1234) for n in range(1, 5): for p in range(5): M = np.random.random((n, n)) Mp = np.linalg.matrix_power(M, p) observed = _onenorm_matrix_power_nnm(M, p) expected = np.linalg.norm(Mp, 1) assert_allclose(observed, expected) class TestExpM: def test_zero_ndarray(self): a = array([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) def test_zero_sparse(self): a = csc_matrix([[0.,0],[0,0]]) assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]]) def test_zero_matrix(self): a = matrix([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) def test_misc_types(self): A = expm(np.array([[1]])) assert_allclose(expm(((1,),)), A) assert_allclose(expm([[1]]), A) assert_allclose(expm(matrix([[1]])), A) assert_allclose(expm(np.array([[1]])), A) assert_allclose(expm(csc_matrix([[1]])).A, A) B = expm(np.array([[1j]])) assert_allclose(expm(((1j,),)), B) assert_allclose(expm([[1j]]), B) assert_allclose(expm(matrix([[1j]])), B) assert_allclose(expm(csc_matrix([[1j]])).A, B) def test_bidiagonal_sparse(self): A = csc_matrix([ [1, 3, 0], [0, 1, 5], [0, 0, 2]], dtype=float) e1 = math.exp(1) e2 = math.exp(2) expected = np.array([ [e1, 3*e1, 15*(e2 - 2*e1)], [0, e1, 5*(e2 - e1)], [0, 0, e2]], dtype=float) observed = expm(A).toarray() assert_array_almost_equal(observed, expected) def test_padecases_dtype_float(self): for dtype in [np.float32, np.float64]: for scale in [1e-2, 1e-1, 5e-1, 1, 10]: A = scale * eye(3, dtype=dtype) observed = expm(A) expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(observed, expected, nulp=100) def test_padecases_dtype_complex(self): for dtype in [np.complex64, np.complex128]: for scale in [1e-2, 1e-1, 5e-1, 1, 10]: A = scale * eye(3, dtype=dtype) observed = expm(A) expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(observed, expected, nulp=100) def test_padecases_dtype_sparse_float(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.float64 for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale, dtype=dtype) * eye(3, dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a csc_matrix is expensive.") exact_onenorm = _expm(a, use_exact_onenorm=True).toarray() inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray() assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100) assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100) def test_padecases_dtype_sparse_complex(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.complex128 for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a csc_matrix is expensive.") assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) def test_logm_consistency(self): random.seed(1234) for dtype in [np.float64, np.complex128]: for n in range(1, 10): for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]: # make logm(A) be of a given scale A = (eye(n) + random.rand(n, n) * scale).astype(dtype) if np.iscomplexobj(A): A = A + 1j * random.rand(n, n) * scale assert_array_almost_equal(expm(logm(A)), A) def test_integer_matrix(self): Q = np.array([ [-3, 1, 1, 1], [1, -3, 1, 1], [1, 1, -3, 1], [1, 1, 1, -3]]) assert_allclose(expm(Q), expm(1.0 * Q)) def test_integer_matrix_2(self): # Check for integer overflows Q = np.array([[-500, 500, 0, 0], [0, -550, 360, 190], [0, 630, -630, 0], [0, 0, 0, 0]], dtype=np.int16) assert_allclose(expm(Q), expm(1.0 * Q)) Q = csc_matrix(Q) assert_allclose(expm(Q).A, expm(1.0 * Q).A) def test_triangularity_perturbation(self): # Experiment (1) of # Awad H. Al-Mohy and Nicholas J. Higham (2012) # Improved Inverse Scaling and Squaring Algorithms # for the Matrix Logarithm. A = np.array([ [3.2346e-1, 3e4, 3e4, 3e4], [0, 3.0089e-1, 3e4, 3e4], [0, 0, 3.221e-1, 3e4], [0, 0, 0, 3.0744e-1]], dtype=float) A_logm = np.array([ [-1.12867982029050462e+00, 9.61418377142025565e+04, -4.52485573953179264e+09, 2.92496941103871812e+14], [0.00000000000000000e+00, -1.20101052953082288e+00, 9.63469687211303099e+04, -4.68104828911105442e+09], [0.00000000000000000e+00, 0.00000000000000000e+00, -1.13289322264498393e+00, 9.53249183094775653e+04], [0.00000000000000000e+00, 0.00000000000000000e+00, 0.00000000000000000e+00, -1.17947533272554850e+00]], dtype=float) assert_allclose(expm(A_logm), A, rtol=1e-4) # Perturb the upper triangular matrix by tiny amounts, # so that it becomes technically not upper triangular. random.seed(1234) tiny = 1e-17 A_logm_perturbed = A_logm.copy() A_logm_perturbed[1, 0] = tiny with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Ill-conditioned.*") A_expm_logm_perturbed = expm(A_logm_perturbed) rtol = 1e-4 atol = 100 * tiny assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol)) def test_burkardt_1(self): # This matrix is diagonal. # The calculation of the matrix exponential is simple. # # This is the first of a series of matrix exponential tests # collected by John Burkardt from the following sources. # # Alan Laub, # Review of "Linear System Theory" by Joao Hespanha, # SIAM Review, # Volume 52, Number 4, December 2010, pages 779--781. # # Cleve Moler and Charles Van Loan, # Nineteen Dubious Ways to Compute the Exponential of a Matrix, # Twenty-Five Years Later, # SIAM Review, # Volume 45, Number 1, March 2003, pages 3--49. # # Cleve Moler, # Cleve's Corner: A Balancing Act for the Matrix Exponential, # 23 July 2012. # # Robert Ward, # Numerical computation of the matrix exponential # with accuracy estimate, # SIAM Journal on Numerical Analysis, # Volume 14, Number 4, September 1977, pages 600--610. exp1 = np.exp(1) exp2 = np.exp(2) A = np.array([ [1, 0], [0, 2], ], dtype=float) desired = np.array([ [exp1, 0], [0, exp2], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_2(self): # This matrix is symmetric. # The calculation of the matrix exponential is straightforward. A = np.array([ [1, 3], [3, 2], ], dtype=float) desired = np.array([ [39.322809708033859, 46.166301438885753], [46.166301438885768, 54.711576854329110], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_3(self): # This example is due to Laub. # This matrix is ill-suited for the Taylor series approach. # As powers of A are computed, the entries blow up too quickly. exp1 = np.exp(1) exp39 = np.exp(39) A = np.array([ [0, 1], [-39, -40], ], dtype=float) desired = np.array([ [ 39/(38*exp1) - 1/(38*exp39), -np.expm1(-38) / (38*exp1)], [ 39*np.expm1(-38) / (38*exp1), -1/(38*exp1) + 39/(38*exp39)], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_4(self): # This example is due to Moler and Van Loan. # The example will cause problems for the series summation approach, # as well as for diagonal Pade approximations. A = np.array([ [-49, 24], [-64, 31], ], dtype=float) U = np.array([[3, 1], [4, 2]], dtype=float) V = np.array([[1, -1/2], [-2, 3/2]], dtype=float) w = np.array([-17, -1], dtype=float) desired = np.dot(U * np.exp(w), V) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_5(self): # This example is due to Moler and Van Loan. # This matrix is strictly upper triangular # All powers of A are zero beyond some (low) limit. # This example will cause problems for Pade approximations. A = np.array([ [0, 6, 0, 0], [0, 0, 6, 0], [0, 0, 0, 6], [0, 0, 0, 0], ], dtype=float) desired = np.array([ [1, 6, 18, 36], [0, 1, 6, 18], [0, 0, 1, 6], [0, 0, 0, 1], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_6(self): # This example is due to Moler and Van Loan. # This matrix does not have a complete set of eigenvectors. # That means the eigenvector approach will fail. exp1 = np.exp(1) A = np.array([ [1, 1], [0, 1], ], dtype=float) desired = np.array([ [exp1, exp1], [0, exp1], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_7(self): # This example is due to Moler and Van Loan. # This matrix is very close to example 5. # Mathematically, it has a complete set of eigenvectors. # Numerically, however, the calculation will be suspect. exp1 = np.exp(1) eps = np.spacing(1) A = np.array([ [1 + eps, 1], [0, 1 - eps], ], dtype=float) desired = np.array([ [exp1, exp1], [0, exp1], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_8(self): # This matrix was an example in Wikipedia. exp4 = np.exp(4) exp16 = np.exp(16) A = np.array([ [21, 17, 6], [-5, -1, -6], [4, 4, 16], ], dtype=float) desired = np.array([ [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4], [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4], [16*exp16, 16*exp16, 4*exp16], ], dtype=float) * 0.25 actual = expm(A) assert_allclose(actual, desired) def test_burkardt_9(self): # This matrix is due to the NAG Library. # It is an example for function F01ECF. A = np.array([ [1, 2, 2, 2], [3, 1, 1, 2], [3, 2, 1, 2], [3, 3, 3, 1], ], dtype=float) desired = np.array([ [740.7038, 610.8500, 542.2743, 549.1753], [731.2510, 603.5524, 535.0884, 542.2743], [823.7630, 679.4257, 603.5524, 610.8500], [998.4355, 823.7630, 731.2510, 740.7038], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_10(self): # This is Ward's example #1. # It is defective and nonderogatory. A = np.array([ [4, 2, 0], [1, 4, 1], [1, 1, 4], ], dtype=float) assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6)) desired = np.array([ [147.8666224463699, 183.7651386463682, 71.79703239999647], [127.7810855231823, 183.7651386463682, 91.88256932318415], [127.7810855231824, 163.6796017231806, 111.9681062463718], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_11(self): # This is Ward's example #2. # It is a symmetric matrix. A = np.array([ [29.87942128909879, 0.7815750847907159, -2.289519314033932], [0.7815750847907159, 25.72656945571064, 8.680737820540137], [-2.289519314033932, 8.680737820540137, 34.39400925519054], ], dtype=float) assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40)) desired = np.array([ [ 5.496313853692378E+15, -1.823188097200898E+16, -3.047577080858001E+16], [ -1.823188097200899E+16, 6.060522870222108E+16, 1.012918429302482E+17], [ -3.047577080858001E+16, 1.012918429302482E+17, 1.692944112408493E+17], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_12(self): # This is Ward's example #3. # Ward's algorithm has difficulty estimating the accuracy # of its results. A = np.array([ [-131, 19, 18], [-390, 56, 54], [-387, 57, 52], ], dtype=float) assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1)) desired = np.array([ [-1.509644158793135, 0.3678794391096522, 0.1353352811751005], [-5.632570799891469, 1.471517758499875, 0.4060058435250609], [-4.934938326088363, 1.103638317328798, 0.5413411267617766], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_13(self): # This is Ward's example #4. # This is a version of the Forsythe matrix. # The eigenvector problem is badly conditioned. # Ward's algorithm has difficulty esimating the accuracy # of its results for this problem. # # Check the construction of one instance of this family of matrices. A4_actual = _burkardt_13_power(4, 1) A4_desired = [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1e-4, 0, 0, 0]] assert_allclose(A4_actual, A4_desired) # Check the expm for a few instances. for n in (2, 3, 4, 10): # Approximate expm using Taylor series. # This works well for this matrix family # because each matrix in the summation, # even before dividing by the factorial, # is entrywise positive with max entry 10**(-floor(p/n)*n). k = max(1, int(np.ceil(16/n))) desired = np.zeros((n, n), dtype=float) for p in range(n*k): Ap = _burkardt_13_power(n, p) assert_equal(np.min(Ap), 0) assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n)) desired += Ap / factorial(p) actual = expm(_burkardt_13_power(n, 1)) assert_allclose(actual, desired) def test_burkardt_14(self): # This is Moler's example. # This badly scaled matrix caused problems for MATLAB's expm(). A = np.array([ [0, 1e-8, 0], [-(2e10 + 4e8/6.), -3, 2e10], [200./3., 0, -200./3.], ], dtype=float) desired = np.array([ [0.446849468283175, 1.54044157383952e-09, 0.462811453558774], [-5743067.77947947, -0.0152830038686819, -4526542.71278401], [0.447722977849494, 1.54270484519591e-09, 0.463480648837651], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_pascal(self): # Test pascal triangle. # Nilpotent exponential, used to trigger a failure (gh-8029) for scale in [1.0, 1e-3, 1e-6]: for n in range(0, 80, 3): sc = scale ** np.arange(n, -1, -1) if np.any(sc < 1e-300): break A = np.diag(np.arange(1, n + 1), -1) * scale B = expm(A) got = B expected = binom(np.arange(n + 1)[:,None], np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None] atol = 1e-13 * abs(expected).max() assert_allclose(got, expected, atol=atol) def test_matrix_input(self): # Large np.matrix inputs should work, gh-5546 A = np.zeros((200, 200)) A[-1,0] = 1 B0 = expm(A) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "the matrix subclass.*") sup.filter(PendingDeprecationWarning, "the matrix subclass.*") B = expm(np.matrix(A)) assert_allclose(B, B0) def test_exp_sinch_overflow(self): # Check overflow in intermediate steps is fixed (gh-11839) L = np.array([[1.0, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, -0.5, -0.5, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, -0.5, -0.5], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]) E0 = expm(-L) E1 = expm(-2**11 * L) E2 = E0 for j in range(11): E2 = E2 @ E2 assert_allclose(E1, E2) class TestOperators: def test_product_operator(self): random.seed(1234) n = 5 k = 2 nsamples = 10 for i in range(nsamples): A = np.random.randn(n, n) B = np.random.randn(n, n) C = np.random.randn(n, n) D = np.random.randn(n, k) op = ProductOperator(A, B, C) assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D)) assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D)) def test_matrix_power_operator(self): random.seed(1234) n = 5 k = 2 p = 3 nsamples = 10 for i in range(nsamples): A = np.random.randn(n, n) B = np.random.randn(n, k) op = MatrixPowerOperator(A, p) assert_allclose(op.matmat(B), matrix_power(A, p).dot(B)) assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))
21,280
35.565292
91
py
scipy
scipy-main/scipy/sparse/linalg/tests/test_interface.py
"""Test functions for the sparse.linalg._interface module """ from functools import partial from itertools import product import operator from pytest import raises as assert_raises, warns from numpy.testing import assert_, assert_equal import numpy as np import scipy.sparse as sparse import scipy.sparse.linalg._interface as interface from scipy.sparse._sputils import matrix class TestLinearOperator: def setup_method(self): self.A = np.array([[1,2,3], [4,5,6]]) self.B = np.array([[1,2], [3,4], [5,6]]) self.C = np.array([[1,2], [3,4]]) def test_matvec(self): def get_matvecs(A): return [{ 'shape': A.shape, 'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]), 'rmatvec': lambda x: np.dot(A.T.conj(), x).reshape(A.shape[1]) }, { 'shape': A.shape, 'matvec': lambda x: np.dot(A, x), 'rmatvec': lambda x: np.dot(A.T.conj(), x), 'rmatmat': lambda x: np.dot(A.T.conj(), x), 'matmat': lambda x: np.dot(A, x) }] for matvecs in get_matvecs(self.A): A = interface.LinearOperator(**matvecs) assert_(A.args == ()) assert_equal(A.matvec(np.array([1,2,3])), [14,32]) assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) assert_equal(A * np.array([1,2,3]), [14,32]) assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) assert_equal(A.dot(np.array([1,2,3])), [14,32]) assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]]) assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]]) assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]]) assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]]) assert_equal((2*A)*[1,1,1], [12,30]) assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18]) assert_equal((2*A).H.matvec([1,1]), [10, 14, 18]) assert_equal((2*A)*[[1],[1],[1]], [[12],[30]]) assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]]) assert_equal((A*2)*[1,1,1], [12,30]) assert_equal((A*2)*[[1],[1],[1]], [[12],[30]]) assert_equal((2j*A)*[1,1,1], [12j,30j]) assert_equal((A+A)*[1,1,1], [12, 30]) assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18]) assert_equal((A+A).H.matvec([1,1]), [10, 14, 18]) assert_equal((A+A)*[[1],[1],[1]], [[12], [30]]) assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]]) assert_equal((-A)*[1,1,1], [-6,-15]) assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]]) assert_equal((A-A)*[1,1,1], [0,0]) assert_equal((A - A) * [[1], [1], [1]], [[0], [0]]) X = np.array([[1, 2], [3, 4]]) # A_asarray = np.array([[1, 2, 3], [4, 5, 6]]) assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X)) assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X)) assert_equal((2j * A).rmatmat(X), np.dot((2j * self.A).T.conj(), X)) assert_equal((A * 2j).rmatmat(X), np.dot((self.A * 2j).T.conj(), X)) assert_equal((A + A).rmatmat(X), np.dot((self.A + self.A).T, X)) assert_equal((A + 2j * A).rmatmat(X), np.dot((self.A + 2j * self.A).T.conj(), X)) assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X)) assert_equal((A - A).rmatmat(X), np.dot((self.A - self.A).T, X)) assert_equal((2j * A).rmatmat(2j * X), np.dot((2j * self.A).T.conj(), 2j * X)) z = A+A assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A) z = 2*A assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2) assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray)) assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A * np.array([1,2,3]), np.ndarray)) assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray)) assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray)) assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray)) assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray)) assert_(isinstance(2*A, interface._ScaledLinearOperator)) assert_(isinstance(2j*A, interface._ScaledLinearOperator)) assert_(isinstance(A+A, interface._SumLinearOperator)) assert_(isinstance(-A, interface._ScaledLinearOperator)) assert_(isinstance(A-A, interface._SumLinearOperator)) assert_(isinstance(A/2, interface._ScaledLinearOperator)) assert_(isinstance(A/2j, interface._ScaledLinearOperator)) assert_(((A * 3) / 3).args[0] is A) # check for simplification # Test that prefactor is of _ScaledLinearOperator is not mutated # when the operator is multiplied by a number result = A @ np.array([1, 2, 3]) B = A * 3 C = A / 5 assert_equal(A @ np.array([1, 2, 3]), result) assert_((2j*A).dtype == np.complex_) # Test division by non-scalar msg = "Can only divide a linear operator by a scalar." with assert_raises(ValueError, match=msg): A / np.array([1, 2]) assert_raises(ValueError, A.matvec, np.array([1,2])) assert_raises(ValueError, A.matvec, np.array([1,2,3,4])) assert_raises(ValueError, A.matvec, np.array([[1],[2]])) assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]])) assert_raises(ValueError, lambda: A*A) assert_raises(ValueError, lambda: A**2) for matvecsA, matvecsB in product(get_matvecs(self.A), get_matvecs(self.B)): A = interface.LinearOperator(**matvecsA) B = interface.LinearOperator(**matvecsB) # AtimesB = np.array([[22, 28], [49, 64]]) AtimesB = self.A.dot(self.B) X = np.array([[1, 2], [3, 4]]) assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X)) assert_equal((2j * A * B).rmatmat(X), np.dot((2j * AtimesB).T.conj(), X)) assert_equal((A*B)*[1,1], [50,113]) assert_equal((A*B)*[[1],[1]], [[50],[113]]) assert_equal((A*B).matmat([[1],[1]]), [[50],[113]]) assert_equal((A * B).rmatvec([1, 1]), [71, 92]) assert_equal((A * B).H.matvec([1, 1]), [71, 92]) assert_(isinstance(A*B, interface._ProductLinearOperator)) assert_raises(ValueError, lambda: A+B) assert_raises(ValueError, lambda: A**2) z = A*B assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B) for matvecsC in get_matvecs(self.C): C = interface.LinearOperator(**matvecsC) X = np.array([[1, 2], [3, 4]]) assert_equal(C.rmatmat(X), np.dot((self.C).T, X)) assert_equal((C**2).rmatmat(X), np.dot((np.dot(self.C, self.C)).T, X)) assert_equal((C**2)*[1,1], [17,37]) assert_equal((C**2).rmatvec([1, 1]), [22, 32]) assert_equal((C**2).H.matvec([1, 1]), [22, 32]) assert_equal((C**2).matmat([[1],[1]]), [[17],[37]]) assert_(isinstance(C**2, interface._PowerLinearOperator)) def test_matmul(self): D = {'shape': self.A.shape, 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), 'rmatvec': lambda x: np.dot(self.A.T.conj(), x).reshape(self.A.shape[1]), 'rmatmat': lambda x: np.dot(self.A.T.conj(), x), 'matmat': lambda x: np.dot(self.A, x)} A = interface.LinearOperator(**D) B = np.array([[1 + 1j, 2, 3], [4, 5, 6], [7, 8, 9]]) b = B[0] assert_equal(operator.matmul(A, b), A * b) assert_equal(operator.matmul(A, b.reshape(-1, 1)), A * b.reshape(-1, 1)) assert_equal(operator.matmul(A, B), A * B) assert_equal(operator.matmul(b, A.H), b * A.H) assert_equal(operator.matmul(b.reshape(1, -1), A.H), b.reshape(1, -1) * A.H) assert_equal(operator.matmul(B, A.H), B * A.H) assert_raises(ValueError, operator.matmul, A, 2) assert_raises(ValueError, operator.matmul, 2, A) class TestAsLinearOperator: def setup_method(self): self.cases = [] def make_cases(original, dtype): cases = [] cases.append((matrix(original, dtype=dtype), original)) cases.append((np.array(original, dtype=dtype), original)) cases.append((sparse.csr_matrix(original, dtype=dtype), original)) # Test default implementations of _adjoint and _rmatvec, which # refer to each other. def mv(x, dtype): y = original.dot(x) if len(x.shape) == 2: y = y.reshape(-1, 1) return y def rmv(x, dtype): return original.T.conj().dot(x) class BaseMatlike(interface.LinearOperator): args = () def __init__(self, dtype): self.dtype = np.dtype(dtype) self.shape = original.shape def _matvec(self, x): return mv(x, self.dtype) class HasRmatvec(BaseMatlike): args = () def _rmatvec(self,x): return rmv(x, self.dtype) class HasAdjoint(BaseMatlike): args = () def _adjoint(self): shape = self.shape[1], self.shape[0] matvec = partial(rmv, dtype=self.dtype) rmatvec = partial(mv, dtype=self.dtype) return interface.LinearOperator(matvec=matvec, rmatvec=rmatvec, dtype=self.dtype, shape=shape) class HasRmatmat(HasRmatvec): def _matmat(self, x): return original.dot(x) def _rmatmat(self, x): return original.T.conj().dot(x) cases.append((HasRmatvec(dtype), original)) cases.append((HasAdjoint(dtype), original)) cases.append((HasRmatmat(dtype), original)) return cases original = np.array([[1,2,3], [4,5,6]]) self.cases += make_cases(original, np.int32) self.cases += make_cases(original, np.float32) self.cases += make_cases(original, np.float64) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.float64)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.float64)] original = np.array([[1, 2j, 3j], [4j, 5j, 6]]) self.cases += make_cases(original, np.complex_) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.complex_)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.complex_)] def test_basic(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M,N = A.shape xs = [np.array([1, 2, 3]), np.array([[1], [2], [3]])] ys = [np.array([1, 2]), np.array([[1], [2]])] if A.dtype == np.complex_: xs += [np.array([1, 2j, 3j]), np.array([[1], [2j], [3j]])] ys += [np.array([1, 2j]), np.array([[1], [2j]])] x2 = np.array([[1, 4], [2, 5], [3, 6]]) for x in xs: assert_equal(A.matvec(x), A_array.dot(x)) assert_equal(A * x, A_array.dot(x)) assert_equal(A.matmat(x2), A_array.dot(x2)) assert_equal(A * x2, A_array.dot(x2)) for y in ys: assert_equal(A.rmatvec(y), A_array.T.conj().dot(y)) assert_equal(A.T.matvec(y), A_array.T.dot(y)) assert_equal(A.H.matvec(y), A_array.T.conj().dot(y)) for y in ys: if y.ndim < 2: continue assert_equal(A.rmatmat(y), A_array.T.conj().dot(y)) assert_equal(A.T.matmat(y), A_array.T.dot(y)) assert_equal(A.H.matmat(y), A_array.T.conj().dot(y)) if hasattr(M,'dtype'): assert_equal(A.dtype, M.dtype) assert_(hasattr(A, 'args')) def test_dot(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M,N = A.shape x0 = np.array([1, 2, 3]) x1 = np.array([[1], [2], [3]]) x2 = np.array([[1, 4], [2, 5], [3, 6]]) assert_equal(A.dot(x0), A_array.dot(x0)) assert_equal(A.dot(x1), A_array.dot(x1)) assert_equal(A.dot(x2), A_array.dot(x2)) def test_repr(): A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1) repr_A = repr(A) assert_('unspecified dtype' not in repr_A, repr_A) def test_identity(): ident = interface.IdentityOperator((3, 3)) assert_equal(ident * [1, 2, 3], [1, 2, 3]) assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9)) assert_raises(ValueError, ident.matvec, [1, 2, 3, 4]) def test_attributes(): A = interface.aslinearoperator(np.arange(16).reshape(4, 4)) def always_four_ones(x): x = np.asarray(x) assert_(x.shape == (3,) or x.shape == (3, 1)) return np.ones(4) B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones) for op in [A, B, A * B, A.H, A + A, B + B, A**4]: assert_(hasattr(op, "dtype")) assert_(hasattr(op, "shape")) assert_(hasattr(op, "_matvec")) def matvec(x): """ Needed for test_pickle as local functions are not pickleable """ return np.zeros(3) def test_pickle(): import pickle for protocol in range(pickle.HIGHEST_PROTOCOL + 1): A = interface.LinearOperator((3, 3), matvec) s = pickle.dumps(A, protocol=protocol) B = pickle.loads(s) for k in A.__dict__: assert_equal(getattr(A, k), getattr(B, k)) def test_inheritance(): class Empty(interface.LinearOperator): pass with warns(RuntimeWarning, match="should implement at least"): assert_raises(TypeError, Empty) class Identity(interface.LinearOperator): def __init__(self, n): super().__init__(dtype=None, shape=(n, n)) def _matvec(self, x): return x id3 = Identity(3) assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3]) assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6]) class MatmatOnly(interface.LinearOperator): def __init__(self, A): super().__init__(A.dtype, A.shape) self.A = A def _matmat(self, x): return self.A.dot(x) mm = MatmatOnly(np.random.randn(5, 3)) assert_equal(mm.matvec(np.random.randn(3)).shape, (5,)) def test_dtypes_of_operator_sum(): # gh-6078 mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2) mat_real = np.random.rand(2,2) complex_operator = interface.aslinearoperator(mat_complex) real_operator = interface.aslinearoperator(mat_real) sum_complex = complex_operator + complex_operator sum_real = real_operator + real_operator assert_equal(sum_real.dtype, np.float64) assert_equal(sum_complex.dtype, np.complex128) def test_no_double_init(): call_count = [0] def matvec(v): call_count[0] += 1 return v # It should call matvec exactly once (in order to determine the # operator dtype) interface.LinearOperator((2, 2), matvec=matvec) assert_equal(call_count[0], 1) def test_adjoint_conjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.H.dot(v), Y.T.conj().dot(v)) def test_ndim(): X = np.array([[1]]) A = interface.aslinearoperator(X) assert_equal(A.ndim, 2) def test_transpose_noconjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.T.dot(v), Y.T.dot(v)) def test_sparse_matmat_exception(): A = interface.LinearOperator((2, 2), matvec=lambda x: x) B = sparse.identity(2) msg = "Unable to multiply a LinearOperator with a sparse matrix." with assert_raises(TypeError, match=msg): A @ B with assert_raises(TypeError, match=msg): B @ A with assert_raises(ValueError): A @ np.identity(4) with assert_raises(ValueError): np.identity(4) @ A
17,943
36.228216
84
py
scipy
scipy-main/scipy/sparse/linalg/tests/test_pydata_sparse.py
import pytest import numpy as np import scipy.sparse as sp import scipy.sparse.linalg as splin from numpy.testing import assert_allclose, assert_equal try: import sparse except Exception: sparse = None pytestmark = pytest.mark.skipif(sparse is None, reason="pydata/sparse not installed") msg = "pydata/sparse (0.8) does not implement necessary operations" sparse_params = (pytest.param("COO"), pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)])) scipy_sparse_classes = [ sp.bsr_matrix, sp.csr_matrix, sp.coo_matrix, sp.csc_matrix, sp.dia_matrix, sp.dok_matrix ] @pytest.fixture(params=sparse_params) def sparse_cls(request): return getattr(sparse, request.param) @pytest.fixture(params=scipy_sparse_classes) def sp_sparse_cls(request): return request.param @pytest.fixture def same_matrix(sparse_cls, sp_sparse_cls): np.random.seed(1234) A_dense = np.random.rand(9, 9) return sp_sparse_cls(A_dense), sparse_cls(A_dense) @pytest.fixture def matrices(sparse_cls): np.random.seed(1234) A_dense = np.random.rand(9, 9) A_dense = A_dense @ A_dense.T A_sparse = sparse_cls(A_dense) b = np.random.rand(9) return A_dense, A_sparse, b def test_isolve_gmres(matrices): # Several of the iterative solvers use the same # isolve.utils.make_system wrapper code, so test just one of them. A_dense, A_sparse, b = matrices x, info = splin.gmres(A_sparse, b, atol=1e-15) assert info == 0 assert isinstance(x, np.ndarray) assert_allclose(A_sparse @ x, b) def test_lsmr(matrices): A_dense, A_sparse, b = matrices res0 = splin.lsmr(A_dense, b) res = splin.lsmr(A_sparse, b) assert_allclose(res[0], res0[0], atol=1.8e-5) # test issue 17012 def test_lsmr_output_shape(): x = splin.lsmr(A=np.ones((10, 1)), b=np.zeros(10), x0=np.ones(1))[0] assert_equal(x.shape, (1,)) def test_lsqr(matrices): A_dense, A_sparse, b = matrices res0 = splin.lsqr(A_dense, b) res = splin.lsqr(A_sparse, b) assert_allclose(res[0], res0[0], atol=1e-5) def test_eigs(matrices): A_dense, A_sparse, v0 = matrices M_dense = np.diag(v0**2) M_sparse = A_sparse.__class__(M_dense) w_dense, v_dense = splin.eigs(A_dense, k=3, v0=v0) w, v = splin.eigs(A_sparse, k=3, v0=v0) assert_allclose(w, w_dense) assert_allclose(v, v_dense) for M in [M_sparse, M_dense]: w_dense, v_dense = splin.eigs(A_dense, M=M_dense, k=3, v0=v0) w, v = splin.eigs(A_sparse, M=M, k=3, v0=v0) assert_allclose(w, w_dense) assert_allclose(v, v_dense) w_dense, v_dense = splin.eigsh(A_dense, M=M_dense, k=3, v0=v0) w, v = splin.eigsh(A_sparse, M=M, k=3, v0=v0) assert_allclose(w, w_dense) assert_allclose(v, v_dense) def test_svds(matrices): A_dense, A_sparse, v0 = matrices u0, s0, vt0 = splin.svds(A_dense, k=2, v0=v0) u, s, vt = splin.svds(A_sparse, k=2, v0=v0) assert_allclose(s, s0) assert_allclose(u, u0) assert_allclose(vt, vt0) def test_lobpcg(matrices): A_dense, A_sparse, x = matrices X = x[:,None] w_dense, v_dense = splin.lobpcg(A_dense, X) w, v = splin.lobpcg(A_sparse, X) assert_allclose(w, w_dense) assert_allclose(v, v_dense) def test_spsolve(matrices): A_dense, A_sparse, b = matrices b2 = np.random.rand(len(b), 3) x0 = splin.spsolve(sp.csc_matrix(A_dense), b) x = splin.spsolve(A_sparse, b) assert isinstance(x, np.ndarray) assert_allclose(x, x0) x0 = splin.spsolve(sp.csc_matrix(A_dense), b) x = splin.spsolve(A_sparse, b, use_umfpack=True) assert isinstance(x, np.ndarray) assert_allclose(x, x0) x0 = splin.spsolve(sp.csc_matrix(A_dense), b2) x = splin.spsolve(A_sparse, b2) assert isinstance(x, np.ndarray) assert_allclose(x, x0) x0 = splin.spsolve(sp.csc_matrix(A_dense), sp.csc_matrix(A_dense)) x = splin.spsolve(A_sparse, A_sparse) assert isinstance(x, type(A_sparse)) assert_allclose(x.toarray(), x0.toarray()) def test_splu(matrices): A_dense, A_sparse, b = matrices n = len(b) sparse_cls = type(A_sparse) lu = splin.splu(A_sparse) assert isinstance(lu.L, sparse_cls) assert isinstance(lu.U, sparse_cls) Pr = sparse_cls(sp.csc_matrix((np.ones(n), (lu.perm_r, np.arange(n))))) Pc = sparse_cls(sp.csc_matrix((np.ones(n), (np.arange(n), lu.perm_c)))) A2 = Pr.T @ lu.L @ lu.U @ Pc.T assert_allclose(A2.toarray(), A_sparse.toarray()) z = lu.solve(A_sparse.toarray()) assert_allclose(z, np.eye(n), atol=1e-10) def test_spilu(matrices): A_dense, A_sparse, b = matrices sparse_cls = type(A_sparse) lu = splin.spilu(A_sparse) assert isinstance(lu.L, sparse_cls) assert isinstance(lu.U, sparse_cls) z = lu.solve(A_sparse.toarray()) assert_allclose(z, np.eye(len(b)), atol=1e-3) def test_spsolve_triangular(matrices): A_dense, A_sparse, b = matrices A_sparse = sparse.tril(A_sparse) x = splin.spsolve_triangular(A_sparse, b) assert_allclose(A_sparse @ x, b) def test_onenormest(matrices): A_dense, A_sparse, b = matrices est0 = splin.onenormest(A_dense) est = splin.onenormest(A_sparse) assert_allclose(est, est0) def test_inv(matrices): A_dense, A_sparse, b = matrices x0 = splin.inv(sp.csc_matrix(A_dense)) x = splin.inv(A_sparse) assert_allclose(x.toarray(), x0.toarray()) def test_expm(matrices): A_dense, A_sparse, b = matrices x0 = splin.expm(sp.csc_matrix(A_dense)) x = splin.expm(A_sparse) assert_allclose(x.toarray(), x0.toarray()) def test_expm_multiply(matrices): A_dense, A_sparse, b = matrices x0 = splin.expm_multiply(A_dense, b) x = splin.expm_multiply(A_sparse, b) assert_allclose(x, x0) def test_eq(same_matrix): sp_sparse, pd_sparse = same_matrix assert (sp_sparse == pd_sparse).all() def test_ne(same_matrix): sp_sparse, pd_sparse = same_matrix assert not (sp_sparse != pd_sparse).any()
6,124
24.309917
76
py