repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
scipy
|
scipy-main/scipy/sparse/linalg/tests/test_onenormest.py
|
"""Test functions for the sparse.linalg._onenormest module
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
import pytest
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
"""
This is purely for onenormest testing.
"""
def __init__(self, A, B):
if A.ndim != 2 or B.ndim != 2:
raise ValueError('expected ndarrays representing matrices')
if A.shape[1] != B.shape[0]:
raise ValueError('incompatible shapes')
self.A = A
self.B = B
self.ndim = 2
self.shape = (A.shape[0], B.shape[1])
def _matvec(self, x):
return np.dot(self.A, np.dot(self.B, x))
def _rmatvec(self, x):
return np.dot(np.dot(x, self.A), self.B)
def _matmat(self, X):
return np.dot(self.A, np.dot(self.B, X))
@property
def T(self):
return MatrixProductOperator(self.B.T, self.A.T)
class TestOnenormest:
@pytest.mark.xslow
def test_onenormest_table_3_t_2(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 2
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
# check the max and mean required column resamples
assert_equal(np.max(nresample_list), 2)
assert_(0.05 < np.mean(nresample_list) < 0.2)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.9 < proportion_exact < 0.95)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
@pytest.mark.xslow
def test_onenormest_table_4_t_7(self):
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 7
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A = np.random.randint(-1, 2, size=(n, n))
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
# check the required column resamples
assert_equal(np.max(nresample_list), 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.15 < proportion_exact < 0.25)
# check the average number of matrix*vector multiplications
assert_(3.5 < np.mean(nmult_list) < 4.5)
def test_onenormest_table_5_t_1(self):
# "note that there is no randomness and hence only one estimate for t=1"
t = 1
n = 100
itmax = 5
alpha = 1 - 1e-6
A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
first_col = np.array([1] + [0]*(n-1))
first_row = np.array([(-alpha)**i for i in range(n)])
B = -scipy.linalg.toeplitz(first_col, first_row)
assert_allclose(A, B)
est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
exact_value = scipy.linalg.norm(B, 1)
underest_ratio = est / exact_value
assert_allclose(underest_ratio, 0.05, rtol=1e-4)
assert_equal(nmults, 11)
assert_equal(nresamples, 0)
# check the non-underscored version of onenormest
est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
assert_allclose(est, est_plain)
@pytest.mark.xslow
def test_onenormest_table_6_t_1(self):
#TODO this test seems to give estimates that match the table,
#TODO even though no attempt has been made to deal with
#TODO complex numbers in the one-norm estimation.
# This will take multiple seconds if your computer is slow like mine.
# It is stochastic, so the tolerance could be too strict.
np.random.seed(1234)
t = 1
n = 100
itmax = 5
nsamples = 5000
observed = []
expected = []
nmult_list = []
nresample_list = []
for i in range(nsamples):
A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
A = scipy.linalg.inv(A_inv)
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
observed.append(est)
expected.append(scipy.linalg.norm(A, 1))
nmult_list.append(nmults)
nresample_list.append(nresamples)
observed = np.array(observed, dtype=float)
expected = np.array(expected, dtype=float)
relative_errors = np.abs(observed - expected) / expected
# check the mean underestimation ratio
underestimation_ratio = observed / expected
underestimation_ratio_mean = np.mean(underestimation_ratio)
assert_(0.90 < underestimation_ratio_mean < 0.99)
# check the required column resamples
max_nresamples = np.max(nresample_list)
assert_equal(max_nresamples, 0)
# check the proportion of norms computed exactly correctly
nexact = np.count_nonzero(relative_errors < 1e-14)
proportion_exact = nexact / float(nsamples)
assert_(0.7 < proportion_exact < 0.8)
# check the average number of matrix*vector multiplications
mean_nmult = np.mean(nmult_list)
assert_(4 < mean_nmult < 5)
def _help_product_norm_slow(self, A, B):
# for profiling
C = np.dot(A, B)
return scipy.linalg.norm(C, 1)
def _help_product_norm_fast(self, A, B):
# for profiling
t = 2
itmax = 5
D = MatrixProductOperator(A, B)
est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
return est
@pytest.mark.slow
def test_onenormest_linear_operator(self):
# Define a matrix through its product A B.
# Depending on the shapes of A and B,
# it could be easy to multiply this product by a small matrix,
# but it could be annoying to look at all of
# the entries of the product explicitly.
np.random.seed(1234)
n = 6000
k = 3
A = np.random.randn(n, k)
B = np.random.randn(k, n)
fast_estimate = self._help_product_norm_fast(A, B)
exact_value = self._help_product_norm_slow(A, B)
assert_(fast_estimate <= exact_value <= 3*fast_estimate,
f'fast: {fast_estimate:g}\nexact:{exact_value:g}')
def test_returns(self):
np.random.seed(1234)
A = scipy.sparse.rand(50, 50, 0.1)
s0 = scipy.linalg.norm(A.toarray(), 1)
s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
assert_allclose(s1, s0, rtol=1e-9)
assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
assert_allclose(A.dot(v), w, rtol=1e-9)
class TestAlgorithm_2_2:
def test_randn_inv(self):
np.random.seed(1234)
n = 20
nsamples = 100
for i in range(nsamples):
# Choose integer t uniformly between 1 and 3 inclusive.
t = np.random.randint(1, 4)
# Choose n uniformly between 10 and 40 inclusive.
n = np.random.randint(10, 41)
# Sample the inverse of a matrix with random normal entries.
A = scipy.linalg.inv(np.random.randn(n, n))
# Compute the 1-norm bounds.
g, ind = _algorithm_2_2(A, A.T, t)
| 9,227
| 35.474308
| 88
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/tests/test_propack.py
|
import os
import pytest
import sys
import numpy as np
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
from scipy.sparse.linalg._svdp import _svdp
from scipy.sparse import csr_matrix, csc_matrix
# dtype_flavour to tolerance
TOLS = {
np.float32: 1e-4,
np.float64: 1e-8,
np.complex64: 1e-4,
np.complex128: 1e-8,
}
def is_complex_type(dtype):
return np.dtype(dtype).kind == "c"
def is_32bit():
return sys.maxsize <= 2**32 # (usually 2**31-1 on 32-bit)
def is_windows():
return 'win32' in sys.platform
_dtypes = []
for dtype_flavour in TOLS.keys():
marks = []
if is_complex_type(dtype_flavour):
if is_32bit():
# PROPACK has issues w/ complex on 32-bit; see gh-14433
marks = [pytest.mark.skip]
elif is_windows() and np.dtype(dtype_flavour).itemsize == 16:
# windows crashes for complex128 (so don't xfail); see gh-15108
marks = [pytest.mark.skip]
else:
marks = [pytest.mark.slow] # type: ignore[list-item]
_dtypes.append(pytest.param(dtype_flavour, marks=marks,
id=dtype_flavour.__name__))
_dtypes = tuple(_dtypes) # type: ignore[assignment]
def generate_matrix(constructor, n, m, f,
dtype=float, rseed=0, **kwargs):
"""Generate a random sparse matrix"""
rng = np.random.RandomState(rseed)
if is_complex_type(dtype):
M = (- 5 + 10 * rng.rand(n, m)
- 5j + 10j * rng.rand(n, m)).astype(dtype)
else:
M = (-5 + 10 * rng.rand(n, m)).astype(dtype)
M[M.real > 10 * f - 5] = 0
return constructor(M, **kwargs)
def assert_orthogonal(u1, u2, rtol, atol):
"""Check that the first k rows of u1 and u2 are orthogonal"""
A = abs(np.dot(u1.conj().T, u2))
assert_allclose(A, np.eye(u1.shape[1], u2.shape[1]), rtol=rtol, atol=atol)
def check_svdp(n, m, constructor, dtype, k, irl_mode, which, f=0.8):
tol = TOLS[dtype]
M = generate_matrix(np.asarray, n, m, f, dtype)
Msp = constructor(M)
u1, sigma1, vt1 = np.linalg.svd(M, full_matrices=False)
u2, sigma2, vt2, _ = _svdp(Msp, k=k, which=which, irl_mode=irl_mode,
tol=tol)
# check the which
if which.upper() == 'SM':
u1 = np.roll(u1, k, 1)
vt1 = np.roll(vt1, k, 0)
sigma1 = np.roll(sigma1, k)
# check that singular values agree
assert_allclose(sigma1[:k], sigma2, rtol=tol, atol=tol)
# check that singular vectors are orthogonal
assert_orthogonal(u1, u2, rtol=tol, atol=tol)
assert_orthogonal(vt1.T, vt2.T, rtol=tol, atol=tol)
@pytest.mark.parametrize('ctor', (np.array, csr_matrix, csc_matrix))
@pytest.mark.parametrize('dtype', _dtypes)
@pytest.mark.parametrize('irl', (True, False))
@pytest.mark.parametrize('which', ('LM', 'SM'))
def test_svdp(ctor, dtype, irl, which):
np.random.seed(0)
n, m, k = 10, 20, 3
if which == 'SM' and not irl:
message = "`which`='SM' requires irl_mode=True"
with assert_raises(ValueError, match=message):
check_svdp(n, m, ctor, dtype, k, irl, which)
else:
if is_32bit() and is_complex_type(dtype):
message = 'PROPACK complex-valued SVD methods not available '
with assert_raises(TypeError, match=message):
check_svdp(n, m, ctor, dtype, k, irl, which)
else:
check_svdp(n, m, ctor, dtype, k, irl, which)
@pytest.mark.parametrize('dtype', _dtypes)
@pytest.mark.parametrize('irl', (False, True))
@pytest.mark.timeout(120) # True, complex64 > 60 s: prerel deps cov 64bit blas
def test_examples(dtype, irl):
# Note: atol for complex64 bumped from 1e-4 to 1e-3 due to test failures
# with BLIS, Netlib, and MKL+AVX512 - see
# https://github.com/conda-forge/scipy-feedstock/pull/198#issuecomment-999180432
atol = {
np.float32: 1.3e-4,
np.float64: 1e-9,
np.complex64: 1e-3,
np.complex128: 1e-9,
}[dtype]
path_prefix = os.path.dirname(__file__)
# Test matrices from `illc1850.coord` and `mhd1280b.cua` distributed with
# PROPACK 2.1: http://sun.stanford.edu/~rmunk/PROPACK/
relative_path = "propack_test_data.npz"
filename = os.path.join(path_prefix, relative_path)
data = np.load(filename, allow_pickle=True)
if is_complex_type(dtype):
A = data['A_complex'].item().astype(dtype)
else:
A = data['A_real'].item().astype(dtype)
k = 200
u, s, vh, _ = _svdp(A, k, irl_mode=irl, random_state=0)
# complex example matrix has many repeated singular values, so check only
# beginning non-repeated singular vectors to avoid permutations
sv_check = 27 if is_complex_type(dtype) else k
u = u[:, :sv_check]
vh = vh[:sv_check, :]
s = s[:sv_check]
# Check orthogonality of singular vectors
assert_allclose(np.eye(u.shape[1]), u.conj().T @ u, atol=atol)
assert_allclose(np.eye(vh.shape[0]), vh @ vh.conj().T, atol=atol)
# Ensure the norm of the difference between the np.linalg.svd and
# PROPACK reconstructed matrices is small
u3, s3, vh3 = np.linalg.svd(A.todense())
u3 = u3[:, :sv_check]
s3 = s3[:sv_check]
vh3 = vh3[:sv_check, :]
A3 = u3 @ np.diag(s3) @ vh3
recon = u @ np.diag(s) @ vh
assert_allclose(np.linalg.norm(A3 - recon), 0, atol=atol)
@pytest.mark.parametrize('shifts', (None, -10, 0, 1, 10, 70))
@pytest.mark.parametrize('dtype', _dtypes[:2])
def test_shifts(shifts, dtype):
np.random.seed(0)
n, k = 70, 10
A = np.random.random((n, n))
if shifts is not None and ((shifts < 0) or (k > min(n-1-shifts, n))):
with pytest.raises(ValueError):
_svdp(A, k, shifts=shifts, kmax=5*k, irl_mode=True)
else:
_svdp(A, k, shifts=shifts, kmax=5*k, irl_mode=True)
@pytest.mark.slow
@pytest.mark.xfail()
def test_shifts_accuracy():
np.random.seed(0)
n, k = 70, 10
A = np.random.random((n, n)).astype(np.double)
u1, s1, vt1, _ = _svdp(A, k, shifts=None, which='SM', irl_mode=True)
u2, s2, vt2, _ = _svdp(A, k, shifts=32, which='SM', irl_mode=True)
# shifts <= 32 doesn't agree with shifts > 32
# Does agree when which='LM' instead of 'SM'
assert_allclose(s1, s2)
| 6,284
| 32.430851
| 84
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/sparse/linalg/tests/test_expm_multiply.py
|
"""Test functions for the sparse.linalg._expm_multiply module."""
from functools import partial
from itertools import product
import numpy as np
import pytest
from numpy.testing import (assert_allclose, assert_, assert_equal,
suppress_warnings)
from scipy.sparse import SparseEfficiencyWarning
from scipy.sparse.linalg import aslinearoperator
import scipy.linalg
from scipy.sparse.linalg import expm as sp_expm
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
_expm_multiply_interval)
IMPRECISE = {np.single, np.csingle}
REAL_DTYPES = {np.intc, np.int_, np.longlong,
np.single, np.double, np.longdouble}
COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble}
# use sorted tuple to ensure fixed order of tests
DTYPES = tuple(sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
def estimated(func):
"""If trace is estimated, it should warn.
We warn that estimation of trace might impact performance.
All result have to be correct nevertheless!
"""
def wrapped(*args, **kwds):
with pytest.warns(UserWarning,
match="Trace of LinearOperator not available"):
return func(*args, **kwds)
return wrapped
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
class TestExpmActionSimple:
"""
These tests do not consider the case of multiple time steps in one call.
"""
def test_theta_monotonicity(self):
pairs = sorted(_theta.items())
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
assert_(theta_a < theta_b)
def test_p_max_default(self):
m_max = 55
expected_p_max = 8
observed_p_max = _compute_p_max(m_max)
assert_equal(observed_p_max, expected_p_max)
def test_p_max_range(self):
for m_max in range(1, 55+1):
p_max = _compute_p_max(m_max)
assert_(p_max*(p_max - 1) <= m_max + 1)
p_too_big = p_max + 1
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
def test_onenormest_matrix_power(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
for p in range(4):
if not p:
M = np.identity(n)
else:
M = np.dot(M, A)
estimated = _onenormest_matrix_power(A, p)
exact = np.linalg.norm(M, 1)
assert_(less_than_or_close(estimated, exact))
assert_(less_than_or_close(exact, 3*estimated))
def test_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
expected = np.dot(sp_expm(A), B)
assert_allclose(observed, expected)
observed = estimated(expm_multiply)(aslinearoperator(A), B)
assert_allclose(observed, expected)
traceA = np.trace(A)
observed = expm_multiply(aslinearoperator(A), B, traceA=traceA)
assert_allclose(observed, expected)
def test_matrix_vector_multiply(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
observed = expm_multiply(A, v)
expected = np.dot(sp_expm(A), v)
assert_allclose(observed, expected)
observed = estimated(expm_multiply)(aslinearoperator(A), v)
assert_allclose(observed, expected)
def test_scaled_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i, t in product(range(nsamples), [0.2, 1.0, 1.5]):
with np.errstate(invalid='ignore'):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = np.dot(sp_expm(t*A), B)
assert_allclose(observed, expected)
observed = estimated(_expm_multiply_simple)(
aslinearoperator(A), B, t=t
)
assert_allclose(observed, expected)
def test_scaled_expm_multiply_single_timepoint(self):
np.random.seed(1234)
t = 0.1
n = 5
k = 2
A = np.random.randn(n, n)
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = sp_expm(t*A).dot(B)
assert_allclose(observed, expected)
observed = estimated(_expm_multiply_simple)(
aslinearoperator(A), B, t=t
)
assert_allclose(observed, expected)
def test_sparse_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu converted its input to CSC format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the"
" CSC matrix format")
expected = sp_expm(A).dot(B)
assert_allclose(observed, expected)
observed = estimated(expm_multiply)(aslinearoperator(A), B)
assert_allclose(observed, expected)
def test_complex(self):
A = np.array([
[1j, 1j],
[0, 1j]], dtype=complex)
B = np.array([1j, 1j])
observed = expm_multiply(A, B)
expected = np.array([
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
1j * np.exp(1j)], dtype=complex)
assert_allclose(observed, expected)
observed = estimated(expm_multiply)(aslinearoperator(A), B)
assert_allclose(observed, expected)
class TestExpmActionInterval:
def test_sparse_expm_multiply_interval(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
n = 40
k = 3
endpoint = True
for num in (14, 13, 2):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
v = np.random.randn(n)
for target in (B, v):
X = expm_multiply(A, target, start=start, stop=stop,
num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu converted its input to CSC format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in"
" the CSC matrix format")
for solution, t in zip(X, samples):
assert_allclose(solution, sp_expm(t*A).dot(target))
def test_expm_multiply_interval_vector(self):
np.random.seed(1234)
interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
for num, n in product([14, 13, 2], [1, 2, 5, 20, 40]):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
samples = np.linspace(num=num, **interval)
X = expm_multiply(A, v, num=num, **interval)
for solution, t in zip(X, samples):
assert_allclose(solution, sp_expm(t*A).dot(v))
# test for linear operator with unknown trace -> estimate trace
Xguess = estimated(expm_multiply)(aslinearoperator(A), v,
num=num, **interval)
# test for linear operator with given trace
Xgiven = expm_multiply(aslinearoperator(A), v, num=num, **interval,
traceA=np.trace(A))
# test robustness for linear operator with wrong trace
Xwrong = expm_multiply(aslinearoperator(A), v, num=num, **interval,
traceA=np.trace(A)*5)
for sol_guess, sol_given, sol_wrong, t in zip(Xguess, Xgiven,
Xwrong, samples):
correct = sp_expm(t*A).dot(v)
assert_allclose(sol_guess, correct)
assert_allclose(sol_given, correct)
assert_allclose(sol_wrong, correct)
def test_expm_multiply_interval_matrix(self):
np.random.seed(1234)
interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
for num, n, k in product([14, 13, 2], [1, 2, 5, 20, 40], [1, 2]):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
samples = np.linspace(num=num, **interval)
X = expm_multiply(A, B, num=num, **interval)
for solution, t in zip(X, samples):
assert_allclose(solution, sp_expm(t*A).dot(B))
X = estimated(expm_multiply)(aslinearoperator(A), B, num=num,
**interval)
for solution, t in zip(X, samples):
assert_allclose(solution, sp_expm(t*A).dot(B))
def test_sparse_expm_multiply_interval_dtypes(self):
# Test A & B int
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A complex, B int
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A int, B complex
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.full(5, 1j, dtype=complex)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
def test_expm_multiply_interval_status_0(self):
self._help_test_specific_expm_interval_status(0)
def test_expm_multiply_interval_status_1(self):
self._help_test_specific_expm_interval_status(1)
def test_expm_multiply_interval_status_2(self):
self._help_test_specific_expm_interval_status(2)
def _help_test_specific_expm_interval_status(self, target_status):
np.random.seed(1234)
start = 0.1
stop = 3.2
num = 13
endpoint = True
n = 5
k = 2
nrepeats = 10
nsuccesses = 0
for num in [14, 13, 2] * nrepeats:
A = np.random.randn(n, n)
B = np.random.randn(n, k)
status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=True)
if status == target_status:
X, status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=False)
assert_equal(X.shape, (num, n, k))
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, sp_expm(t*A).dot(B))
nsuccesses += 1
if not nsuccesses:
msg = 'failed to find a status-' + str(target_status) + ' interval'
raise Exception(msg)
@pytest.mark.parametrize("dtype_a", DTYPES)
@pytest.mark.parametrize("dtype_b", DTYPES)
@pytest.mark.parametrize("b_is_matrix", [False, True])
def test_expm_multiply_dtype(dtype_a, dtype_b, b_is_matrix):
"""Make sure `expm_multiply` handles all numerical dtypes correctly."""
assert_allclose_ = (partial(assert_allclose, rtol=1.2e-3, atol=1e-5)
if {dtype_a, dtype_b} & IMPRECISE else assert_allclose)
rng = np.random.default_rng(1234)
# test data
n = 7
b_shape = (n, 3) if b_is_matrix else (n, )
if dtype_a in REAL_DTYPES:
A = scipy.linalg.inv(rng.random([n, n])).astype(dtype_a)
else:
A = scipy.linalg.inv(
rng.random([n, n]) + 1j*rng.random([n, n])
).astype(dtype_a)
if dtype_b in REAL_DTYPES:
B = (2*rng.random(b_shape)).astype(dtype_b)
else:
B = (rng.random(b_shape) + 1j*rng.random(b_shape)).astype(dtype_b)
# single application
sol_mat = expm_multiply(A, B)
sol_op = estimated(expm_multiply)(aslinearoperator(A), B)
direct_sol = np.dot(sp_expm(A), B)
assert_allclose_(sol_mat, direct_sol)
assert_allclose_(sol_op, direct_sol)
sol_op = expm_multiply(aslinearoperator(A), B, traceA=np.trace(A))
assert_allclose_(sol_op, direct_sol)
# for time points
interval = {'start': 0.1, 'stop': 3.2, 'num': 13, 'endpoint': True}
samples = np.linspace(**interval)
X_mat = expm_multiply(A, B, **interval)
X_op = estimated(expm_multiply)(aslinearoperator(A), B, **interval)
for sol_mat, sol_op, t in zip(X_mat, X_op, samples):
direct_sol = sp_expm(t*A).dot(B)
assert_allclose_(sol_mat, direct_sol)
assert_allclose_(sol_op, direct_sol)
| 13,919
| 39.231214
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/lsqr.py
|
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg._interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
iter_lim=None, show=False, calc_var=False, x0=None):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||Ax - b||^2`` or
``min ||Ax - b||^2 + d^2 ||x - x0||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve Ax = b
2. Linear least squares -- solve Ax = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( damp*x0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
Representation of an m-by-n matrix.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : array_like, shape (m,)
Right-hand side vector ``b``.
damp : float
Damping coefficient. Default is 0.
atol, btol : float, optional
Stopping tolerances. `lsqr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, `lsqr` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, `lsqr` terminates when ``norm(A^H r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default),
the final ``norm(r)`` should be accurate to about 6
digits. (The final ``x`` will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of ``A`` and ``b`` respectively. For example, if the entries
of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive. Default is 1e8.
iter_lim : int, optional
Explicit limitation on number of iterations (for safety).
show : bool, optional
Display an iteration log. Default is False.
calc_var : bool, optional
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used. Default is None.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x - x0)^2 )``. Equal to `r1norm`
if ``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'@r - damp^2*(x - x0))``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A@x0``.
2. Use LSQR to solve the system ``A@dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A@x = b and k2 iterations to solve A@dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A@x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A@dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M@x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A@M(inverse)@z =
b``, after which x can be recovered by solving M@x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsqr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution ``[0, 0]``
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsqr(A, b)[:4]
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains
``[0., 0.]``. The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> r1norm
4.440892098500627e-16
As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
limits. The given solution ``[1., -1.]`` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> r1norm
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `r1norm`
contains the norm of the minimal residual that was found.
"""
A = aslinearoperator(A)
b = np.atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = f'The matrix A has {m} rows and {n} columns'
str2 = f'damp = {damp:20.14e} calc_var = {calc_var:8g}'
str3 = f'atol = {atol:8.2e} conlim = {conlim:8.2e}'
str4 = f'btol = {btol:8.2e} iter_lim = {iter_lim:8g}'
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
# Set up the first vectors u and v for the bidiagonalization.
# These satisfy beta*u = b - A@x, alfa*v = A'@u.
u = b
bnorm = np.linalg.norm(b)
if x0 is None:
x = np.zeros(n)
beta = bnorm.copy()
else:
x = np.asarray(x0)
u = u - A.matvec(x)
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
else:
v = x.copy()
alfa = 0
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
if show:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {r1norm:10.3e} {r2norm:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alfa, v. These satisfy the relations
# beta*u = a@v - alfa*u,
# alfa*v = A'@u - beta*v.
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + dampsq)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
if damp > 0:
rhobar1 = sqrt(rhobar**2 + dampsq)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
else:
# cs1 = 1 and sn1 = 0
rhobar1 = rhobar
psi = 0.
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x - x0||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x - x0||^2).
# Although there is cancellation, it might be accurate enough.
if damp > 0:
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
else:
r1norm = rnorm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
if show:
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {r1norm:10.3e} {r2norm:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
str4 = f' {anorm:8.1e} {acond:8.1e}'
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = f'istop ={istop:8g} r1norm ={r1norm:8.1e}'
str2 = f'anorm ={anorm:8.1e} arnorm ={arnorm:8.1e}'
str3 = f'itn ={itn:8g} r2norm ={r2norm:8.1e}'
str4 = f'acond ={acond:8.1e} xnorm ={xnorm:8.1e}'
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
| 21,214
| 35.079932
| 84
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/setup.py
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_isolve', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 347
| 25.769231
| 63
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/iterative.py
|
import warnings
import numpy as np
from scipy.sparse.linalg._interface import LinearOperator
from .utils import make_system
from scipy.linalg import get_lapack_funcs
from scipy._lib.deprecation import _NoValue
__all__ = ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'qmr']
def _get_atol(name, b, tol=_NoValue, atol=0., rtol=1e-5):
"""
A helper function to handle tolerance deprecations and normalization
"""
if tol is not _NoValue:
msg = (f"'scipy.sparse.linalg.{name}' keyword argument 'tol' is "
"deprecated in favor of 'rtol' and will be removed in SciPy "
"v.1.14.0. Until then, if set, it will override 'rtol'.")
warnings.warn(msg, category=DeprecationWarning, stacklevel=4)
rtol = float(tol) if tol is not None else rtol
if atol == 'legacy':
warnings.warn("scipy.sparse.linalg.{name} called with `atol` set to "
"string. This behavior is deprecated and atol parameter"
" only excepts floats. In SciPy 1.14, this will result"
" with an error.", category=DeprecationWarning,
stacklevel=4)
atol = 0
atol = max(float(atol), float(rtol) * float(np.linalg.norm(b)))
return atol
def bicg(A, b, x0=None, tol=_NoValue, maxiter=None, M=None, callback=None,
atol=0., rtol=1e-5):
"""Use BIConjugate Gradient iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``atol=0.`` and ``rtol=1e-5``.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
tol : float, optional, deprecated
.. deprecated 1.12.0
`bicg` keyword argument `tol` is deprecated in favor of `rtol` and
will be removed in SciPy 1.14.0.
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : parameter breakdown
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import bicg
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1.]])
>>> b = np.array([2., 4., -1.])
>>> x, exitCode = bicg(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
bnrm2 = np.linalg.norm(b)
if bnrm2 == 0:
return postprocess(b), 0
atol = _get_atol('bicg', b, tol, atol, rtol)
n = len(b)
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
if maxiter is None:
maxiter = n*10
matvec, rmatvec = A.matvec, A.rmatvec
psolve, rpsolve = M.matvec, M.rmatvec
rhotol = np.finfo(x.dtype.char).eps**2
# Dummy values to initialize vars, silence linter warnings
rho_prev, p, ptilde = None, None, None
r = b - matvec(x) if x.any() else b.copy()
rtilde = r.copy()
for iteration in range(maxiter):
if np.linalg.norm(r) < atol: # Are we done?
return postprocess(x), 0
z = psolve(r)
ztilde = rpsolve(rtilde)
# order matters in this dot product
rho_cur = dotprod(rtilde, z)
if np.abs(rho_cur) < rhotol: # Breakdown case
return postprocess, -10
if iteration > 0:
beta = rho_cur / rho_prev
p *= beta
p += z
ptilde *= beta.conj()
ptilde += ztilde
else: # First spin
p = z.copy()
ptilde = ztilde.copy()
q = matvec(p)
qtilde = rmatvec(ptilde)
rv = dotprod(ptilde, q)
if rv == 0:
return postprocess(x), -11
alpha = rho_cur / rv
x += alpha*p
r -= alpha*q
rtilde -= alpha.conj()*qtilde
rho_prev = rho_cur
if callback:
callback(x)
else: # for loop exhausted
# Return incomplete progress
return postprocess(x), maxiter
def bicgstab(A, b, x0=None, tol=_NoValue, maxiter=None, M=None, callback=None,
atol=0., rtol=1e-5):
"""Use BIConjugate Gradient STABilized iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``atol=0.`` and ``rtol=1e-5``.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
tol : float, optional, deprecated
.. deprecated 1.12.0
`bicgstab` keyword argument `tol` is deprecated in favor of `rtol`
and will be removed in SciPy 1.14.0.
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : parameter breakdown
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import bicgstab
>>> R = np.array([[4, 2, 0, 1],
... [3, 0, 0, 2],
... [0, 1, 1, 1],
... [0, 2, 1, 0]])
>>> A = csc_matrix(R)
>>> b = np.array([-1, -0.5, -1, 2])
>>> x, exit_code = bicgstab(A, b)
>>> print(exit_code) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
bnrm2 = np.linalg.norm(b)
if bnrm2 == 0:
return postprocess(b), 0
atol = _get_atol('bicgstab', b, tol, atol, rtol)
n = len(b)
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
# These values make no sense but coming from original Fortran code
# sqrt might have been meant instead.
rhotol = np.finfo(x.dtype.char).eps**2
omegatol = rhotol
# Dummy values to initialize vars, silence linter warnings
rho_prev, omega, alpha, p, v = None, None, None, None, None
r = b - matvec(x) if x.any() else b.copy()
rtilde = r.copy()
for iteration in range(maxiter):
if np.linalg.norm(r) < atol: # Are we done?
return postprocess(x), 0
rho = dotprod(rtilde, r)
if np.abs(rho) < rhotol: # rho breakdown
return postprocess(x), -10
if iteration > 0:
if np.abs(omega) < omegatol: # omega breakdown
return postprocess(x), -11
beta = (rho / rho_prev) * (alpha / omega)
p -= omega*v
p *= beta
p += r
else: # First spin
s = np.empty_like(r)
p = r.copy()
phat = psolve(p)
v = matvec(phat)
rv = dotprod(rtilde, v)
if rv == 0:
return postprocess(x), -11
alpha = rho / rv
r -= alpha*v
s[:] = r[:]
if np.linalg.norm(s) < atol:
x += alpha*phat
return postprocess(x), 0
shat = psolve(s)
t = matvec(shat)
omega = dotprod(t, s) / dotprod(t, t)
x += alpha*phat
x += omega*shat
r -= omega*t
rho_prev = rho
if callback:
callback(x)
else: # for loop exhausted
# Return incomplete progress
return postprocess(x), maxiter
def cg(A, b, x0=None, tol=_NoValue, maxiter=None, M=None, callback=None,
atol=0., rtol=1e-5):
"""Use Conjugate Gradient iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
``A`` must represent a hermitian, positive definite matrix.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``atol=0.`` and ``rtol=1e-5``.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
tol : float, optional, deprecated
.. deprecated 1.12.0
`cg` keyword argument `tol` is deprecated in favor of `rtol` and
will be removed in SciPy 1.14.0.
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import cg
>>> P = np.array([[4, 0, 1, 0],
... [0, 5, 0, 0],
... [1, 0, 3, 2],
... [0, 0, 2, 4]])
>>> A = csc_matrix(P)
>>> b = np.array([-1, -0.5, -1, 2])
>>> x, exit_code = cg(A, b)
>>> print(exit_code) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
bnrm2 = np.linalg.norm(b)
if bnrm2 == 0:
return postprocess(b), 0
atol = _get_atol('cg', b, tol, atol, rtol)
n = len(b)
if maxiter is None:
maxiter = n*10
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
matvec = A.matvec
psolve = M.matvec
r = b - matvec(x) if x.any() else b.copy()
# Dummy value to initialize var, silences warnings
rho_prev, p = None, None
for iteration in range(maxiter):
if np.linalg.norm(r) < atol: # Are we done?
return postprocess(x), 0
z = psolve(r)
rho_cur = dotprod(r, z)
if iteration > 0:
beta = rho_cur / rho_prev
p *= beta
p += z
else: # First spin
p = np.empty_like(r)
p[:] = z[:]
q = matvec(p)
alpha = rho_cur / dotprod(p, q)
x += alpha*p
r -= alpha*q
rho_prev = rho_cur
if callback:
callback(x)
else: # for loop exhausted
# Return incomplete progress
return postprocess(x), maxiter
def cgs(A, b, x0=None, tol=_NoValue, maxiter=None, M=None, callback=None,
atol=0., rtol=1e-5):
"""Use Conjugate Gradient Squared iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real-valued N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``atol=0.`` and ``rtol=1e-5``.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
tol : float, optional, deprecated
.. deprecated 1.12.0
`cgs` keyword argument `tol` is deprecated in favor of `rtol` and
will be removed in SciPy 1.14.0.
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : parameter breakdown
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import cgs
>>> R = np.array([[4, 2, 0, 1],
... [3, 0, 0, 2],
... [0, 1, 1, 1],
... [0, 2, 1, 0]])
>>> A = csc_matrix(R)
>>> b = np.array([-1, -0.5, -1, 2])
>>> x, exit_code = cgs(A, b)
>>> print(exit_code) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
bnrm2 = np.linalg.norm(b)
if bnrm2 == 0:
return postprocess(b), 0
atol = _get_atol('cgs', b, tol, atol, rtol)
n = len(b)
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
rhotol = np.finfo(x.dtype.char).eps**2
r = b - matvec(x) if x.any() else b.copy()
rtilde = r.copy()
bnorm = np.linalg.norm(b)
if bnorm == 0:
bnorm = 1
# Dummy values to initialize vars, silence linter warnings
rho_prev, p, u, q = None, None, None, None
for iteration in range(maxiter):
rnorm = np.linalg.norm(r)
if rnorm < atol: # Are we done?
return postprocess(x), 0
rho_cur = dotprod(rtilde, r)
if np.abs(rho_cur) < rhotol: # Breakdown case
return postprocess, -10
if iteration > 0:
beta = rho_cur / rho_prev
# u = r + beta * q
# p = u + beta * (q + beta * p);
u[:] = r[:]
u += beta*q
p *= beta
p += q
p *= beta
p += u
else: # First spin
p = r.copy()
u = r.copy()
q = np.empty_like(r)
phat = psolve(p)
vhat = matvec(phat)
rv = dotprod(rtilde, vhat)
if rv == 0: # Dot product breakdown
return postprocess(x), -11
alpha = rho_cur / rv
q[:] = u[:]
q -= alpha*vhat
uhat = psolve(u + q)
x += alpha*uhat
# Due to numerical error build-up the actual residual is computed
# instead of the following two lines that were in the original
# FORTRAN templates, still using a single matvec.
# qhat = matvec(uhat)
# r -= alpha*qhat
r = b - matvec(x)
rho_prev = rho_cur
if callback:
callback(x)
else: # for loop exhausted
# Return incomplete progress
return postprocess(x), maxiter
def gmres(A, b, x0=None, tol=_NoValue, restart=None, maxiter=None, M=None,
callback=None, restrt=_NoValue, atol=0., callback_type=None,
rtol=1e-5):
"""
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution (a vector of zeros by default).
atol, rtol : float
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``atol=0.`` and ``rtol=1e-5``.
restart : int, optional
Number of iterations between restarts. Larger values increase
iteration cost, but may be necessary for convergence.
If omitted, ``min(20, n)`` is used.
maxiter : int, optional
Maximum number of iterations (restart cycles). Iteration will stop
after maxiter steps even if the specified tolerance has not been
achieved. See `callback_type`.
M : {sparse matrix, ndarray, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
In this implementation, left preconditioning is used,
and the preconditioned residual is minimized. However, the final
convergence is tested with respect to the ``b - A @ x`` residual.
callback : function
User-supplied function to call after each iteration. It is called
as `callback(args)`, where `args` are selected by `callback_type`.
callback_type : {'x', 'pr_norm', 'legacy'}, optional
Callback function argument requested:
- ``x``: current iterate (ndarray), called on every restart
- ``pr_norm``: relative (preconditioned) residual norm (float),
called on every inner iteration
- ``legacy`` (default): same as ``pr_norm``, but also changes the
meaning of `maxiter` to count inner iterations instead of restart
cycles.
This keyword has no effect if `callback` is not set.
restrt : int, optional, deprecated
.. deprecated:: 0.11.0
`gmres` keyword argument `restrt` is deprecated in favor of
`restart` and will be removed in SciPy 1.14.0.
tol : float, optional, deprecated
.. deprecated 1.12.0
`gmres` keyword argument `tol` is deprecated in favor of `rtol` and
will be removed in SciPy 1.14.0
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
See Also
--------
LinearOperator
Notes
-----
A preconditioner, P, is chosen such that P is close to A but easy to solve
for. The preconditioner parameter required by this routine is
``M = P^-1``. The inverse should preferably not be calculated
explicitly. Rather, use the following template to produce M::
# Construct a linear operator that computes P^-1 @ x.
import scipy.sparse.linalg as spla
M_x = lambda x: spla.spsolve(P, x)
M = spla.LinearOperator((n, n), M_x)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import gmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = gmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Handle the deprecation frenzy
if restrt not in (None, _NoValue) and restart:
raise ValueError("Cannot specify both 'restart' and 'restrt'"
" keywords. Also 'rstrt' is deprecated."
" and will be removed in SciPy 1.14.0. Use "
"'restart' instad.")
if restrt is not _NoValue:
msg = ("'gmres' keyword argument 'restrt' is deprecated "
"in favor of 'restart' and will be removed in SciPy"
" 1.14.0. Until then, if set, 'rstrt' will override 'restart'."
)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
restart = restrt
if callback is not None and callback_type is None:
# Warn about 'callback_type' semantic changes.
# Probably should be removed only in far future, Scipy 2.0 or so.
msg = ("scipy.sparse.linalg.gmres called without specifying "
"`callback_type`. The default value will be changed in"
" a future release. For compatibility, specify a value "
"for `callback_type` explicitly, e.g., "
"``gmres(..., callback_type='pr_norm')``, or to retain the "
"old behavior ``gmres(..., callback_type='legacy')``"
)
warnings.warn(msg, category=DeprecationWarning, stacklevel=3)
if callback_type is None:
callback_type = 'legacy'
if callback_type not in ('x', 'pr_norm', 'legacy'):
raise ValueError(f"Unknown callback_type: {callback_type!r}")
if callback is None:
callback_type = None
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
n = len(b)
bnrm2 = np.linalg.norm(b)
if bnrm2 == 0:
return postprocess(b), 0
eps = np.finfo(x.dtype.char).eps
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
if maxiter is None:
maxiter = n*10
if restart is None:
restart = 20
restart = min(restart, n)
atol = _get_atol('gmres', b, tol, atol, rtol)
Mb_nrm2 = np.linalg.norm(psolve(b))
# ====================================================
# =========== Tolerance control from gh-8400 =========
# ====================================================
# Tolerance passed to GMRESREVCOM applies to the inner
# iteration and deals with the left-preconditioned
# residual.
ptol_max_factor = 1.
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
presid = 0.
# ====================================================
lartg = get_lapack_funcs('lartg', dtype=x.dtype)
# allocate internal variables
v = np.empty([restart+1, n], dtype=x.dtype)
h = np.zeros([restart, restart+1], dtype=x.dtype)
givens = np.zeros([restart, 2], dtype=x.dtype)
# legacy iteration count
inner_iter = 0
for iteration in range(maxiter):
if iteration == 0:
r = b - matvec(x) if x.any() else b.copy()
v[0, :] = psolve(r)
tmp = np.linalg.norm(v[0, :])
v[0, :] *= (1 / tmp)
# RHS of the Hessenberg problem
S = np.zeros(restart+1, dtype=x.dtype)
S[0] = tmp
breakdown = False
for col in range(restart):
av = matvec(v[col, :])
w = psolve(av)
# Modified Gram-Schmidt
h0 = np.linalg.norm(w)
for k in range(col+1):
tmp = dotprod(v[k, :], w)
h[col, k] = tmp
w -= tmp*v[k, :]
h1 = np.linalg.norm(w)
h[col, col + 1] = h1
v[col + 1, :] = w[:]
# Exact solution indicator
if h1 <= eps*h0:
h[col, col + 1] = 0
breakdown = True
else:
v[col + 1, :] *= (1 / h1)
# apply past Givens rotations to current h column
for k in range(col):
c, s = givens[k, 0], givens[k, 1]
n0, n1 = h[col, [k, k+1]]
h[col, [k, k + 1]] = [c*n0 + s*n1, -s.conj()*n0 + c*n1]
# get and apply current rotation to h and S
c, s, mag = lartg(h[col, col], h[col, col+1])
givens[col, :] = [c, s]
h[col, [col, col+1]] = mag, 0
# S[col+1] component is always 0
tmp = -np.conjugate(s)*S[col]
S[[col, col + 1]] = [c*S[col], tmp]
presid = np.abs(tmp)
inner_iter += 1
if callback_type in ('legacy', 'pr_norm'):
callback(presid / bnrm2)
# Legacy behavior
if callback_type == 'legacy' and inner_iter == maxiter:
break
if presid <= ptol or breakdown:
break
# Solve h(col, col) upper triangular system and allow pseudo-solve
# singular cases as in (but without the f2py copies):
# y = trsv(h[:col+1, :col+1].T, S[:col+1])
if h[col, col] == 0:
S[col] = 0
y = np.zeros([col+1], dtype=x.dtype)
y[:] = S[:col+1]
for k in range(col, 0, -1):
if y[k] != 0:
y[k] /= h[k, k]
tmp = y[k]
y[:k] -= tmp*h[k, :k]
if y[0] != 0:
y[0] /= h[0, 0]
x += y @ v[:col+1, :]
r = b - matvec(x)
rnorm = np.linalg.norm(r)
# Legacy exit
if callback_type == 'legacy' and inner_iter == maxiter:
return postprocess(x), 0 if rnorm <= atol else maxiter
if callback_type == 'x':
callback(x)
if rnorm <= atol:
break
elif breakdown:
# Reached breakdown (= exact solution), but the external
# tolerance check failed. Bail out with failure.
break
elif presid <= ptol:
# Inner loop passed but outer didn't
ptol_max_factor = max(eps, 0.25 * ptol_max_factor)
else:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
ptol = presid * min(ptol_max_factor, atol / rnorm)
info = 0 if (rnorm <= atol) else maxiter
return postprocess(x), info
def qmr(A, b, x0=None, tol=_NoValue, maxiter=None, M1=None, M2=None,
callback=None, atol=0., rtol=1e-5):
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real-valued N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
atol, rtol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``atol=0.`` and ``rtol=1e-5``.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, ndarray, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, ndarray, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1@A@M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
tol : float, optional, deprecated
.. deprecated 1.12.0
`qmr` keyword argument `tol` is deprecated in favor of `rtol` and
will be removed in SciPy 1.14.0.
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : parameter breakdown
See Also
--------
LinearOperator
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import qmr
>>> A = csc_matrix([[3., 2., 0.], [1., -1., 0.], [0., 5., 1.]])
>>> b = np.array([2., 4., -1.])
>>> x, exitCode = qmr(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A_ = A
A, M, x, b, postprocess = make_system(A, None, x0, b)
bnrm2 = np.linalg.norm(b)
if bnrm2 == 0:
return postprocess(b), 0
atol = _get_atol('qmr', b, tol, atol, rtol)
if M1 is None and M2 is None:
if hasattr(A_, 'psolve'):
def left_psolve(b):
return A_.psolve(b, 'left')
def right_psolve(b):
return A_.psolve(b, 'right')
def left_rpsolve(b):
return A_.rpsolve(b, 'left')
def right_rpsolve(b):
return A_.rpsolve(b, 'right')
M1 = LinearOperator(A.shape,
matvec=left_psolve,
rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape,
matvec=right_psolve,
rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
dotprod = np.vdot if np.iscomplexobj(x) else np.dot
rhotol = np.finfo(x.dtype.char).eps
betatol = rhotol
gammatol = rhotol
deltatol = rhotol
epsilontol = rhotol
xitol = rhotol
r = b - A.matvec(x) if x.any() else b.copy()
vtilde = r.copy()
y = M1.matvec(vtilde)
rho = np.linalg.norm(y)
wtilde = r.copy()
z = M2.rmatvec(wtilde)
xi = np.linalg.norm(z)
gamma, eta, theta = 1, -1, 0
v = np.empty_like(vtilde)
w = np.empty_like(wtilde)
# Dummy values to initialize vars, silence linter warnings
epsilon, q, d, p, s = None, None, None, None, None
for iteration in range(maxiter):
if np.linalg.norm(r) < atol: # Are we done?
return postprocess(x), 0
if np.abs(rho) < rhotol: # rho breakdown
return postprocess(x), -10
if np.abs(xi) < xitol: # xi breakdown
return postprocess(x), -15
v[:] = vtilde[:]
v *= (1 / rho)
y *= (1 / rho)
w[:] = wtilde[:]
w *= (1 / xi)
z *= (1 / xi)
delta = dotprod(z, y)
if np.abs(delta) < deltatol: # delta breakdown
return postprocess(x), -13
ytilde = M2.matvec(y)
ztilde = M1.rmatvec(z)
if iteration > 0:
ytilde -= (xi * delta / epsilon) * p
p[:] = ytilde[:]
ztilde -= (rho * (delta / epsilon).conj()) * q
q[:] = ztilde[:]
else: # First spin
p = ytilde.copy()
q = ztilde.copy()
ptilde = A.matvec(p)
epsilon = dotprod(q, ptilde)
if np.abs(epsilon) < epsilontol: # epsilon breakdown
return postprocess(x), -14
beta = epsilon / delta
if np.abs(beta) < betatol: # beta breakdown
return postprocess(x), -11
vtilde[:] = ptilde[:]
vtilde -= beta*v
y = M1.matvec(vtilde)
rho_prev = rho
rho = np.linalg.norm(y)
wtilde[:] = w[:]
wtilde *= - beta.conj()
wtilde += A.rmatvec(q)
z = M2.rmatvec(wtilde)
xi = np.linalg.norm(z)
gamma_prev = gamma
theta_prev = theta
theta = rho / (gamma_prev * np.abs(beta))
gamma = 1 / np.sqrt(1 + theta**2)
if np.abs(gamma) < gammatol: # gamma breakdown
return postprocess(x), -12
eta *= -(rho_prev / beta) * (gamma / gamma_prev)**2
if iteration > 0:
d *= (theta_prev * gamma) ** 2
d += eta*p
s *= (theta_prev * gamma) ** 2
s += eta*ptilde
else:
d = p.copy()
d *= eta
s = ptilde.copy()
s *= eta
x += d
r -= s
if callback:
callback(x)
else: # for loop exhausted
# Return incomplete progress
return postprocess(x), maxiter
| 34,750
| 31.660714
| 78
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/utils.py
|
__docformat__ = "restructuredtext en"
__all__ = []
from numpy import asanyarray, asarray, array, zeros
from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, str, None}
initial guess to iterative method.
``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.
Default is `None`, which means using the zero initial guess.
b : array_like
right hand side
Returns
-------
(A, M, x, b, postprocess)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
postprocess : function
converts the solution vector to the appropriate
type and dimensions (e.g. (N,1) matrix)
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '
'incompatible')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
def postprocess(x):
return x
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
b = asarray(b,dtype=xtype) # make b the same type as x
b = b.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
# set initial guess
if x0 is None:
x = zeros(N, dtype=xtype)
elif isinstance(x0, str):
if x0 == 'Mb': # use nonzero initial guess ``M @ b``
bCopy = b.copy()
x = M.matvec(bCopy)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N, 1) or x.shape == (N,)):
raise ValueError(f'shapes of A {A.shape} and '
f'x0 {x.shape} are incompatible')
x = x.ravel()
return A, M, x, b, postprocess
| 3,598
| 27.117188
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/minres.py
|
from numpy import inner, zeros, inf, finfo
from numpy.linalg import norm
from math import sqrt
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(Ax - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
shift : float
Value to apply to the system ``(A - shift * I)x = b``. Default is 0.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
show : bool
If ``True``, print out a summary and metrics related to the solution
during iterations. Default is ``False``.
check : bool
If ``True``, run additional input validation to check that `A` and
`M` (if specified) are symmetric. Default is ``False``.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import minres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> A = A + A.T
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = minres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + f'n = {n:3g} shift = {shift:23.14e}')
print(first + f'itnlim = {maxiter:3g} rtol = {tol:11.2e}')
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
if x0 is None:
r1 = b.copy()
else:
r1 = b - A@x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
bnorm = norm(b)
if bnorm == 0:
x = b
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q @ H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = f'{itn:6g} {x[0]:12.5e} {test1:10.3e}'
str2 = f' {test2:10.3e}'
str3 = f' {Anorm:8.1e} {Acond:8.1e} {gbar/Anorm:8.1e}'
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + f' istop = {istop:3g} itn ={itn:5g}')
print(last + f' Anorm = {Anorm:12.4e} Acond = {Acond:12.4e}')
print(last + f' rnorm = {rnorm:12.4e} ynorm = {ynorm:12.4e}')
print(last + f' Arnorm = {Arnorm:12.4e}')
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
| 10,878
| 28.16622
| 85
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/lgmres.py
|
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import get_blas_funcs
from .utils import make_system
from ._gcrotmk import _fgmres
__all__ = ['lgmres']
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
prepend_outer_v=False, atol=None):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A@v in addition to vectors `v`
in the `outer_v` list. Default is True.
prepend_outer_v : bool, optional
Whether to put outer_v augmentation vectors before Krylov iterates.
In standard LGMRES, prepend_outer_v=False.
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
restarted GMRES", PhD thesis, University of Colorado (2003).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lgmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = lgmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if atol is None:
warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
if b_norm == 0:
x = b
return (postprocess(x), 0)
ptol_max_factor = 1.0
for k_outer in range(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= max(atol, tol * b_norm):
break
# -- inner LGMRES iteration
v0 = -psolve(r_outer)
inner_res_0 = nrm2(v0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
v0 = scal(1.0/inner_res_0, v0)
ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm)
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
v0,
inner_m,
lpsolve=psolve,
atol=ptol,
outer_v=outer_v,
prepend_outer_v=prepend_outer_v)
y *= inner_res_0
if not np.isfinite(y).all():
# Overflow etc. in computation. There's no way to
# recover from this, so we have to bail out.
raise LinAlgError()
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return postprocess(x), k_outer + 1
# Inner loop tolerance control
if pres > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
# -- GMRES terminated: eval solution
dx = zs[0]*y[0]
for w, yc in zip(zs[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0
| 8,932
| 36.533613
| 85
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/__init__.py
|
"Iterative Solvers for Sparse Linear Systems"
#from info import __doc__
from .iterative import *
from .minres import minres
from .lgmres import lgmres
from .lsqr import lsqr
from .lsmr import lsmr
from ._gcrotmk import gcrotmk
from .tfqmr import tfqmr
__all__ = [
'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
'lgmres', 'lsmr', 'lsqr',
'minres', 'qmr', 'tfqmr'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 479
| 21.857143
| 56
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/lsmr.py
|
"""
Copyright (C) 2010 David Fong and Michael Saunders
LSMR uses an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
__all__ = ['lsmr']
from numpy import zeros, infty, atleast_1d, result_type
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg._interface import aslinearoperator
from scipy.sparse.linalg._isolve.lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False, x0=None):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
``A`` is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. ``b`` is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^H x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : array_like, shape (m,)
Vector ``b`` in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization. Default is 0.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, `lsmr` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, `lsmr` terminates when ``norm(A^H r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default),
the final ``norm(r)`` should be accurate to about 6
digits. (The final ``x`` will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of ``A`` and ``b`` respectively. For example, if the entries
of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive. Default is 1e8.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed. Default is False.
show : bool, optional
Print iterations logs if ``show=True``. Default is False.
x0 : array_like, shape (n,), optional
Initial guess of ``x``, if None zeros are used. Default is None.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
solution.
= 1 means x is an approximate solution to A@x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^H (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
:arxiv:`1006.0758`
.. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsmr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution ``[0, 0]``
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
0
>>> x
array([0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains
``[0., 0.]``. The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> normr
4.440892098500627e-16
As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
limits. The given solution ``[1., -1.]`` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> normr
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `normr`
contains the minimal distance that was found.
"""
A = aslinearoperator(A)
b = atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm Ar'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if x0 is None:
dtype = result_type(A, b, float)
else:
dtype = result_type(A, b, x0, float)
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(f'The matrix A has {m} rows and {n} columns')
print('damp = %20.14e\n' % (damp))
print(f'atol = {atol:8.2e} conlim = {conlim:8.2e}\n')
print(f'btol = {btol:8.2e} maxiter = {maxiter:8g}\n')
u = b
normb = norm(b)
if x0 is None:
x = zeros(n, dtype)
beta = normb.copy()
else:
x = atleast_1d(x0.copy())
u = u - A.matvec(x)
beta = norm(u)
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u)
alpha = norm(v)
else:
v = zeros(n, dtype)
alpha = 0
if alpha > 0:
v = (1 / alpha) * v
# Initialize variables for 1st iteration.
itn = 0
zetabar = alpha * beta
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n, dtype)
# Initialize variables for estimation of ||r||.
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
# Initialize variables for estimation of ||A|| and cond(A)
normA2 = alpha * alpha
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
# Items for use in stopping rules, normb set earlier
istop = 0
ctol = 0
if conlim > 0:
ctol = 1 / conlim
normr = beta
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
normar = alpha * beta
if normar == 0:
if show:
print(msg[0])
return x, istop, itn, normr, normar, normA, condA, normx
if normb == 0:
x[()] = 0
return x, istop, itn, normr, normar, normA, condA, normx
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = alpha / beta
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {normr:10.3e} {normar:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
print(''.join([str1, str2, str3]))
# Main iteration loop.
while itn < maxiter:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alpha, v. These satisfy the relations
# beta*u = A@v - alpha*u,
# alpha*v = A'@u - beta*v.
u *= -alpha
u += A.matvec(v)
beta = norm(u)
if beta > 0:
u *= (1 / beta)
v *= -beta
v += A.rmatvec(u)
alpha = norm(v)
if alpha > 0:
v *= (1 / alpha)
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
# Construct rotation Qhat_{k,2k+1}.
chat, shat, alphahat = _sym_ortho(alphabar, damp)
# Use a plane rotation (Q_i) to turn B_i to R_i
rhoold = rho
c, s, rho = _sym_ortho(alphahat, beta)
thetanew = s*alpha
alphabar = c*alpha
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
rhobarold = rhobar
zetaold = zeta
thetabar = sbar * rho
rhotemp = cbar * rho
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
zeta = cbar * zetabar
zetabar = - sbar * zetabar
# Update h, h_hat, x.
hbar *= - (thetabar * rho / (rhoold * rhobarold))
hbar += h
x += (zeta / (rho * rhobar)) * hbar
h *= - (thetanew / rho)
h += v
# Estimate of ||r||.
# Apply rotation Qhat_{k,2k+1}.
betaacute = chat * betadd
betacheck = -shat * betadd
# Apply rotation Q_{k,k+1}.
betahat = c * betaacute
betadd = -s * betaacute
# Apply rotation Qtilde_{k-1}.
# betad = betad_{k-1} here.
thetatildeold = thetatilde
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
thetatilde = stildeold * rhobar
rhodold = ctildeold * rhobar
betad = - stildeold * betad + ctildeold * betahat
# betad = betad_k here.
# rhodold = rhod_k here.
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
taud = (zeta - thetatilde * tautildeold) / rhodold
d = d + betacheck * betacheck
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
# Estimate ||A||.
normA2 = normA2 + beta * beta
normA = sqrt(normA2)
normA2 = normA2 + alpha * alpha
# Estimate cond(A).
maxrbar = max(maxrbar, rhobarold)
if itn > 1:
minrbar = min(minrbar, rhobarold)
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
# Test for convergence.
# Compute norms for convergence testing.
normar = abs(zetabar)
normx = norm(x)
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = normr / normb
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = infty
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normAl tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= maxiter:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
if show:
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
(istop != 0):
if pcount >= pfreq:
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = pcount + 1
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {normr:10.3e} {normar:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
str4 = f' {normA:8.1e} {condA:8.1e}'
print(''.join([str1, str2, str3, str4]))
if istop > 0:
break
# Print the stopping condition.
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print(f'istop ={istop:8g} normr ={normr:8.1e}')
print(f' normA ={normA:8.1e} normAr ={normar:8.1e}')
print(f'itn ={itn:8g} condA ={condA:8.1e}')
print(' normx =%8.1e' % (normx))
print(str1, str2)
print(str3, str4)
return x, istop, itn, normr, normar, normA, condA, normx
| 15,657
| 31.151951
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tfqmr.py
|
import numpy as np
from .utils import make_system
__all__ = ['tfqmr']
def tfqmr(A, b, x0=None, tol=1e-5, maxiter=None, M=None,
callback=None, atol=None, show=False):
"""
Use Transpose-Free Quasi-Minimal Residual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, `A` can be a linear operator which can
produce ``Ax`` using, e.g.,
`scipy.sparse.linalg.LinearOperator`.
b : {ndarray}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {ndarray}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b-Ax0), atol)``.
The default for `tol` is 1.0e-5.
The default for `atol` is ``tol * norm(b-Ax0)``.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
Default is ``min(10000, ndofs * 10)``, where ``ndofs = A.shape[0]``.
M : {sparse matrix, ndarray, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
callback : function, optional
User-supplied function to call after each iteration. It is called
as `callback(xk)`, where `xk` is the current solution vector.
show : bool, optional
Specify ``show = True`` to show the convergence, ``show = False`` is
to close the output of the convergence.
Default is `False`.
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The Transpose-Free QMR algorithm is derived from the CGS algorithm.
However, unlike CGS, the convergence curves for the TFQMR method is
smoothed by computing a quasi minimization of the residual norm. The
implementation supports left preconditioner, and the "residual norm"
to compute in convergence criterion is actually an upper bound on the
actual residual norm ``||b - Axk||``.
References
----------
.. [1] R. W. Freund, A Transpose-Free Quasi-Minimal Residual Algorithm for
Non-Hermitian Linear Systems, SIAM J. Sci. Comput., 14(2), 470-482,
1993.
.. [2] Y. Saad, Iterative Methods for Sparse Linear Systems, 2nd edition,
SIAM, Philadelphia, 2003.
.. [3] C. T. Kelley, Iterative Methods for Linear and Nonlinear Equations,
number 16 in Frontiers in Applied Mathematics, SIAM, Philadelphia,
1995.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import tfqmr
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = tfqmr(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Check data type
dtype = A.dtype
if np.issubdtype(dtype, np.int64):
dtype = float
A = A.astype(dtype)
if np.issubdtype(b.dtype, np.int64):
b = b.astype(dtype)
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Check if the R.H.S is a zero vector
if np.linalg.norm(b) == 0.:
x = b.copy()
return (postprocess(x), 0)
ndofs = A.shape[0]
if maxiter is None:
maxiter = min(10000, ndofs * 10)
if x0 is None:
r = b.copy()
else:
r = b - A.matvec(x)
u = r
w = r.copy()
# Take rstar as b - Ax0, that is rstar := r = b - Ax0 mathematically
rstar = r
v = M.matvec(A.matvec(r))
uhat = v
d = theta = eta = 0.
rho = np.inner(rstar.conjugate(), r)
rhoLast = rho
r0norm = np.sqrt(rho)
tau = r0norm
if r0norm == 0:
return (postprocess(x), 0)
if atol is None:
atol = tol * r0norm
else:
atol = max(atol, tol * r0norm)
for iter in range(maxiter):
even = iter % 2 == 0
if (even):
vtrstar = np.inner(rstar.conjugate(), v)
# Check breakdown
if vtrstar == 0.:
return (postprocess(x), -1)
alpha = rho / vtrstar
uNext = u - alpha * v # [1]-(5.6)
w -= alpha * uhat # [1]-(5.8)
d = u + (theta**2 / alpha) * eta * d # [1]-(5.5)
# [1]-(5.2)
theta = np.linalg.norm(w) / tau
c = np.sqrt(1. / (1 + theta**2))
tau *= theta * c
# Calculate step and direction [1]-(5.4)
eta = (c**2) * alpha
z = M.matvec(d)
x += eta * z
if callback is not None:
callback(x)
# Convergence criteron
if tau * np.sqrt(iter+1) < atol:
if (show):
print("TFQMR: Linear solve converged due to reach TOL "
"iterations {}".format(iter+1))
return (postprocess(x), 0)
if (not even):
# [1]-(5.7)
rho = np.inner(rstar.conjugate(), w)
beta = rho / rhoLast
u = w + beta * u
v = beta * uhat + (beta**2) * v
uhat = M.matvec(A.matvec(u))
v += uhat
else:
uhat = M.matvec(A.matvec(uNext))
u = uNext
rhoLast = rho
if (show):
print("TFQMR: Linear solve not converged due to reach MAXIT "
"iterations {}".format(iter+1))
return (postprocess(x), maxiter)
| 6,241
| 32.740541
| 85
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/_gcrotmk.py
|
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
from scipy.sparse.linalg._isolve.utils import make_system
__all__ = ['gcrotmk']
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
prepend_outer_v=False):
"""
FGMRES Arnoldi process, with optional projection or augmentation
Parameters
----------
matvec : callable
Operation A*x
v0 : ndarray
Initial vector, normalized to nrm2(v0) == 1
m : int
Number of GMRES rounds
atol : float
Absolute tolerance for early exit
lpsolve : callable
Left preconditioner L
rpsolve : callable
Right preconditioner R
cs : list of (ndarray, ndarray)
Columns of matrices C and U in GCROT
outer_v : list of ndarrays
Augmentation vectors in LGMRES
prepend_outer_v : bool, optional
Whether augmentation vectors come before or after
Krylov iterates
Raises
------
LinAlgError
If nans encountered
Returns
-------
Q, R : ndarray
QR decomposition of the upper Hessenberg H=QR
B : ndarray
Projections corresponding to matrix C
vs : list of ndarray
Columns of matrix V
zs : list of ndarray
Columns of matrix Z
y : ndarray
Solution to ||H y - e_1||_2 = min!
res : float
The final (preconditioned) residual norm
"""
if lpsolve is None:
def lpsolve(x):
return x
if rpsolve is None:
def rpsolve(x):
return x
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
vs = [v0]
zs = []
y = None
res = np.nan
m = m + len(outer_v)
# Orthogonal projection coefficients
B = np.zeros((len(cs), m), dtype=v0.dtype)
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=v0.dtype)
R = np.zeros((1, 0), dtype=v0.dtype)
eps = np.finfo(v0.dtype).eps
breakdown = False
# FGMRES Arnoldi process
for j in range(m):
# L A Z = C B + V H
if prepend_outer_v and j < len(outer_v):
z, w = outer_v[j]
elif prepend_outer_v and j == len(outer_v):
z = rpsolve(v0)
w = None
elif not prepend_outer_v and j >= m - len(outer_v):
z, w = outer_v[j - (m - len(outer_v))]
else:
z = rpsolve(vs[-1])
w = None
if w is None:
w = lpsolve(matvec(z))
else:
# w is clobbered below
w = w.copy()
w_norm = nrm2(w)
# GCROT projection: L A -> (1 - C C^H) L A
# i.e. orthogonalize against C
for i, c in enumerate(cs):
alpha = dot(c, w)
B[i,j] = alpha
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
# Orthogonalize against V
hcur = np.zeros(j+2, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, w)
hcur[i] = alpha
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
hcur[i+1] = nrm2(w)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
w = scal(alpha, w)
if not (hcur[-1] > eps * w_norm):
# w essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(w)
zs.append(z)
# Arnoldi LSQ problem
# Add new column to H=Q@R, padding other columns with zeros
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
Q2[:j+1,:j+1] = Q
Q2[j+1,j+1] = 1
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
R2[:j+1,:] = R
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
res = abs(Q[0,-1])
# Check for termination
if res < atol or breakdown:
break
if not np.isfinite(R[j,j]):
# nans encountered, bail out
raise LinAlgError()
# -- Get the LSQ problem solution
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
B = B[:,:j+1]
return Q, R, B, vs, zs, y, res
def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
atol=None):
"""
Solve a matrix equation using flexible GCROT(m,k) algorithm.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
can vary from iteration to iteration. Effective preconditioning
dramatically improves the rate of convergence, which implies that
fewer iterations are needed to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
m : int, optional
Number of inner FGMRES iterations per each outer iteration.
Default: 20
k : int, optional
Number of vectors to carry between inner FGMRES iterations.
According to [2]_, good values are around m.
Default: m
CU : list of tuples, optional
List of tuples ``(c, u)`` which contain the columns of the matrices
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
The list given and vectors contained in it are modified in-place.
If not given, start from empty matrices. The ``c`` elements in the
tuples can be ``None``, in which case the vectors are recomputed
via ``c = A u`` on start and orthogonalized as described in [3]_.
discard_C : bool, optional
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
for different linear systems.
truncate : {'oldest', 'smallest'}, optional
Truncation scheme to use. Drop: oldest vectors, or vectors with
smallest singular values using the scheme discussed in [1,2].
See [2]_ for detailed comparison.
Default: 'oldest'
Returns
-------
x : ndarray
The solution found.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import gcrotmk
>>> R = np.random.randn(5, 5)
>>> A = csc_matrix(R)
>>> b = np.random.randn(5)
>>> x, exit_code = gcrotmk(A, b)
>>> print(exit_code)
0
>>> np.allclose(A.dot(x), b)
True
References
----------
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
of GCROT for solving nonsymmetric linear systems'',
SIAM J. Sci. Comput. 32, 172 (2010).
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
''Recycling Krylov subspaces for sequences of linear systems'',
SIAM J. Sci. Comput. 28, 1651 (2006).
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if truncate not in ('oldest', 'smallest'):
raise ValueError(f"Invalid value for 'truncate': {truncate!r}")
if atol is None:
warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if CU is None:
CU = []
if k is None:
k = m
axpy, dot, scal = None, None, None
if x0 is None:
r = b.copy()
else:
r = b - matvec(x)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
b_norm = nrm2(b)
if b_norm == 0:
x = b
return (postprocess(x), 0)
if discard_C:
CU[:] = [(None, u) for c, u in CU]
# Reorthogonalize old vectors
if CU:
# Sort already existing vectors to the front
CU.sort(key=lambda cu: cu[0] is not None)
# Fill-in missing ones
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
us = []
j = 0
while CU:
# More memory-efficient: throw away old vectors as we go
c, u = CU.pop(0)
if c is None:
c = matvec(u)
C[:,j] = c
j += 1
us.append(u)
# Orthogonalize
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
del C
# C := Q
cs = list(Q.T)
# U := U P R^-1, back-substitution
new_us = []
for j in range(len(cs)):
u = us[P[j]]
for i in range(j):
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
# discard rest of the vectors
break
u = scal(1.0/R[j,j], u)
new_us.append(u)
# Form the new CU lists
CU[:] = list(zip(cs, new_us))[::-1]
if CU:
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
# Solve first the projection operation with respect to the CU
# vectors. This corresponds to modifying the initial guess to
# be
#
# x' = x + U y
# y = argmin_y || b - A (x + U y) ||^2
#
# The solution is y = C^H (b - A x)
for c, u in CU:
yc = dot(c, r)
x = axpy(u, x, x.shape[0], yc)
r = axpy(c, r, r.shape[0], -yc)
# GCROT main iteration
for j_outer in range(maxiter):
# -- callback
if callback is not None:
callback(x)
beta = nrm2(r)
# -- check stopping condition
beta_tol = max(atol, tol * b_norm)
if beta <= beta_tol and (j_outer > 0 or CU):
# recompute residual to avoid rounding error
r = b - matvec(x)
beta = nrm2(r)
if beta <= beta_tol:
j_outer = -1
break
ml = m + max(k - len(CU), 0)
cs = [c for c, u in CU]
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
r/beta,
ml,
rpsolve=psolve,
atol=max(atol, tol*b_norm)/beta,
cs=cs)
y *= beta
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
break
#
# At this point,
#
# [A U, A Z] = [C, V] G; G = [ I B ]
# [ 0 H ]
#
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
#
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
#
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
#
#
# GCROT(m,k) update
#
# Define new outer vectors
# ux := (Z - U B) y
ux = zs[0]*y[0]
for z, yc in zip(zs[1:], y[1:]):
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
by = B.dot(y)
for cu, byc in zip(CU, by):
c, u = cu
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
# cx := V H y
hy = Q.dot(R.dot(y))
cx = vs[0] * hy[0]
for v, hyc in zip(vs[1:], hy[1:]):
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
# Normalize cx, maintaining cx = A ux
# This new cx is orthogonal to the previous C, by construction
try:
alpha = 1/nrm2(cx)
if not np.isfinite(alpha):
raise FloatingPointError()
except (FloatingPointError, ZeroDivisionError):
# Cannot update, so skip it
continue
cx = scal(alpha, cx)
ux = scal(alpha, ux)
# Update residual and solution
gamma = dot(cx, r)
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
# Truncate CU
if truncate == 'oldest':
while len(CU) >= k and CU:
del CU[0]
elif truncate == 'smallest':
if len(CU) >= k and CU:
# cf. [1,2]
D = solve(R[:-1,:].T, B.T).T
W, sigma, V = svd(D)
# C := C W[:,:k-1], U := U W[:,:k-1]
new_CU = []
for j, w in enumerate(W[:,:k-1].T):
c, u = CU[0]
c = c * w[0]
u = u * w[0]
for cup, wp in zip(CU[1:], w[1:]):
cp, up = cup
c = axpy(cp, c, c.shape[0], wp)
u = axpy(up, u, u.shape[0], wp)
# Reorthogonalize at the same time; not necessary
# in exact arithmetic, but floating point error
# tends to accumulate here
for cp, up in new_CU:
alpha = dot(cp, c)
c = axpy(cp, c, c.shape[0], -alpha)
u = axpy(up, u, u.shape[0], -alpha)
alpha = nrm2(c)
c = scal(1.0/alpha, c)
u = scal(1.0/alpha, u)
new_CU.append((c, u))
CU[:] = new_CU
# Add new vector to CU
CU.append((cx, ux))
# Include the solution vector to the span
CU.append((None, x.copy()))
if discard_C:
CU[:] = [(None, uz) for cz, uz in CU]
return postprocess(x), j_outer + 1
| 15,984
| 30.343137
| 86
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py
|
#!/usr/bin/env python
"""Tests for the linalg._isolve.gcrotmk module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg._isolve import gcrotmk, gmres
Am = csr_matrix(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]
def matvec(v):
count[0] += 1
return Am@v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
return x0, count_0
class TestGCROTMK:
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_equal(count_1, 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_arnoldi(self):
np.random.seed(1)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = np.random.rand(30)
for truncate in ['oldest', 'smallest']:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
maxiter=200)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_CU(self):
for discard_C in (True, False):
# Check that C,U behave as expected
CU = []
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
assert_(len(CU) > 0)
assert_(len(CU) <= 6)
if discard_C:
for c, u in CU:
assert_(c is None)
# should converge immediately
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
if discard_C:
assert_equal(count_1, 2 + len(CU))
else:
assert_equal(count_1, 3)
assert_(count_1 <= count_0/2)
assert_allclose(x1, x0, atol=1e-14)
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = gcrotmk(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)
| 5,408
| 31.584337
| 81
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_lsmr.py
|
"""
Copyright (C) 2010 David Fong and Michael Saunders
Distributed under the same license as SciPy
Testing Code for LSMR.
03 Jun 2010: First version release with lsmr.py
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from numpy import array, arange, eye, zeros, ones, transpose, hstack
from numpy.linalg import norm
from numpy.testing import assert_allclose
import pytest
from scipy.sparse import coo_matrix
from scipy.sparse.linalg._interface import aslinearoperator
from scipy.sparse.linalg import lsmr
from .test_lsqr import G, b
class TestLSMR:
def setup_method(self):
self.n = 10
self.m = 10
def assertCompatibleSystem(self, A, xtrue):
Afun = aslinearoperator(A)
b = Afun.matvec(xtrue)
x = lsmr(A, b)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testIdentityACase1(self):
A = eye(self.n)
xtrue = zeros((self.n, 1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase2(self):
A = eye(self.n)
xtrue = ones((self.n,1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase3(self):
A = eye(self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A, xtrue)
def testBidiagonalA(self):
A = lowerBidiagonalMatrix(20,self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A,xtrue)
def testScalarB(self):
A = array([[1.0, 2.0]])
b = 3.0
x = lsmr(A, b)[0]
assert norm(A.dot(x) - b) == pytest.approx(0)
def testComplexX(self):
A = eye(self.n)
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
self.assertCompatibleSystem(A, xtrue)
def testComplexX0(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1))
b = aslinearoperator(A).matvec(xtrue)
x0 = zeros(self.n, dtype=complex)
x = lsmr(A, b, x0=x0)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testComplexA(self):
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
self.assertCompatibleSystem(A, xtrue)
def testComplexB(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
b = aslinearoperator(A).matvec(xtrue)
x = lsmr(A, b)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testColumnB(self):
A = eye(self.n)
b = ones((self.n, 1))
x = lsmr(A, b)[0]
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
def testInitialization(self):
# Test that the default setting is not modified
x_ref, _, itn_ref, normr_ref, *_ = lsmr(G, b)
assert_allclose(norm(b - G@x_ref), normr_ref, atol=1e-6)
# Test passing zeros yields similiar result
x0 = zeros(b.shape)
x = lsmr(G, b, x0=x0)[0]
assert_allclose(x, x_ref)
# Test warm-start with single iteration
x0 = lsmr(G, b, maxiter=1)[0]
x, _, itn, normr, *_ = lsmr(G, b, x0=x0)
assert_allclose(norm(b - G@x), normr, atol=1e-6)
# NOTE(gh-12139): This doesn't always converge to the same value as
# ref because error estimates will be slightly different when calculated
# from zeros vs x0 as a result only compare norm and itn (not x).
# x generally converges 1 iteration faster because it started at x0.
# itn == itn_ref means that lsmr(x0) took an extra iteration see above.
# -1 is technically possible but is rare (1 in 100000) so it's more
# likely to be an error elsewhere.
assert itn - itn_ref in (0, 1)
# If an extra iteration is performed normr may be 0, while normr_ref
# may be much larger.
assert normr < normr_ref * (1 + 1e-6)
class TestLSMRReturns:
def setup_method(self):
self.n = 10
self.A = lowerBidiagonalMatrix(20, self.n)
self.xtrue = transpose(arange(self.n, 0, -1))
self.Afun = aslinearoperator(self.A)
self.b = self.Afun.matvec(self.xtrue)
self.x0 = ones(self.n)
self.x00 = self.x0.copy()
self.returnValues = lsmr(self.A, self.b)
self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
def test_unchanged_x0(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValuesX0
assert_allclose(self.x00, self.x0)
def testNormr(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(self.b - self.Afun.matvec(x)) == pytest.approx(normr)
def testNormar(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert (norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))
== pytest.approx(normar))
def testNormx(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(x) == pytest.approx(normx)
def lowerBidiagonalMatrix(m, n):
# This is a simple example for testing LSMR.
# It uses the leading m*n submatrix from
# A = [ 1
# 1 2
# 2 3
# 3 4
# ...
# n ]
# suitably padded by zeros.
#
# 04 Jun 2010: First version for distribution with lsmr.py
if m <= n:
row = hstack((arange(m, dtype=int),
arange(1, m, dtype=int)))
col = hstack((arange(m, dtype=int),
arange(m-1, dtype=int)))
data = hstack((arange(1, m+1, dtype=float),
arange(1,m, dtype=float)))
return coo_matrix((data, (row, col)), shape=(m,n))
else:
row = hstack((arange(n, dtype=int),
arange(1, n+1, dtype=int)))
col = hstack((arange(n, dtype=int),
arange(n, dtype=int)))
data = hstack((arange(1, n+1, dtype=float),
arange(1,n+1, dtype=float)))
return coo_matrix((data,(row, col)), shape=(m,n))
| 6,366
| 33.231183
| 80
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_lgmres.py
|
"""Tests for the linalg._isolve.lgmres module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import pytest
from platform import python_implementation
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg._isolve import lgmres, gmres
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
[1, -2, 1, 0, 5, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 3, 0, 1, -2, 1],
[1, 0, 0, 0, 1, -2]]))
b = array([1, 2, 3, 4, 5, 6])
count = [0]
def matvec(v):
count[0] += 1
return Am@v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=6, tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
return x0, count_0
class TestLGMRES:
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_(count_1 == 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_outer_v(self):
# Check that the augmentation vectors behave as expected
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 2, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
# ---
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
store_outer_Av=False)
assert_(array([v[1] is None for v in outer_v]).all())
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 3, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
@pytest.mark.skipif(python_implementation() == 'PyPy',
reason="Fails on PyPy CI runs. See #9507")
def test_arnoldi(self):
np.random.seed(1234)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=15, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
norm = np.linalg.norm(A.dot(x0) - b)
assert_(norm > 1e-4)
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1, 1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = lgmres(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_breakdown_with_outer_v(self):
A = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([1, 2])
x = np.linalg.solve(A, b)
v0 = np.array([1, 0])
# The inner iteration should converge to the correct solution,
# since it's in the outer vector list
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
assert_allclose(xp, x, atol=1e-12)
def test_breakdown_underdetermined(self):
# Should find LSQ solution in the Krylov span in one inner
# iteration, despite solver breakdown from nilpotent A.
A = np.array([[0, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]], dtype=float)
bs = [
np.array([1, 1, 1, 1]),
np.array([1, 1, 1, 0]),
np.array([1, 1, 0, 0]),
np.array([1, 0, 0, 0]),
]
for b in bs:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, maxiter=1)
resp = np.linalg.norm(A.dot(xp) - b)
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
x = K.dot(y)
res = np.linalg.norm(A.dot(x) - b)
assert_allclose(resp, res, err_msg=repr(b))
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)
| 7,060
| 32.306604
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_lsqr.py
|
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal, assert_equal
import pytest
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg import lsqr
# Set up a test problem
n = 35
G = np.eye(n)
normal = np.random.normal
norm = np.linalg.norm
for jj in range(5):
gg = normal(size=n)
hh = gg * gg.T
G += (hh + hh.T) * 0.5
G += normal(size=n) * normal(size=n)
b = normal(size=n)
# tolerance for atol/btol keywords of lsqr()
tol = 2e-10
# tolerances for testing the results of the lsqr() call with assert_allclose
# These tolerances are a bit fragile - see discussion in gh-15301.
atol_test = 4e-10
rtol_test = 2e-8
show = False
maxit = None
def test_lsqr_basic():
b_copy = b.copy()
xo, *_ = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
assert_array_equal(b_copy, b)
svx = np.linalg.solve(G, b)
assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
# Now the same but with damp > 0.
# This is equivalent to solving the extented system:
# ( G ) @ x = ( b )
# ( damp*I ) ( 0 )
damp = 1.5
xo, *_ = lsqr(
G, b, damp=damp, show=show, atol=tol, btol=tol, iter_lim=maxit)
Gext = np.r_[G, damp * np.eye(G.shape[1])]
bext = np.r_[b, np.zeros(G.shape[1])]
svx, *_ = np.linalg.lstsq(Gext, bext, rcond=None)
assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
def test_gh_2466():
row = np.array([0, 0])
col = np.array([0, 1])
val = np.array([1, -1])
A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
b = np.asarray([4])
lsqr(A, b)
def test_well_conditioned_problems():
# Test that sparse the lsqr solver returns the right solution
# on various problems with different random seeds.
# This is a non-regression test for a potential ZeroDivisionError
# raised when computing the `test2` & `test3` convergence conditions.
n = 10
A_sparse = scipy.sparse.eye(n, n)
A_dense = A_sparse.toarray()
with np.errstate(invalid='raise'):
for seed in range(30):
rng = np.random.RandomState(seed + 10)
beta = rng.rand(n)
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
b = A_sparse @ beta[:, np.newaxis]
output = lsqr(A_sparse, b, show=show)
# Check that the termination condition corresponds to an approximate
# solution to Ax = b
assert_equal(output[1], 1)
solution = output[0]
# Check that we recover the ground truth solution
assert_allclose(solution, beta)
# Sanity check: compare to the dense array solver
reference_solution = np.linalg.solve(A_dense, b).ravel()
assert_allclose(solution, reference_solution)
def test_b_shapes():
# Test b being a scalar.
A = np.array([[1.0, 2.0]])
b = 3.0
x = lsqr(A, b)[0]
assert norm(A.dot(x) - b) == pytest.approx(0)
# Test b being a column vector.
A = np.eye(10)
b = np.ones((10, 1))
x = lsqr(A, b)[0]
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
def test_initialization():
# Test the default setting is the same as zeros
b_copy = b.copy()
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
x0 = np.zeros(x_ref[0].shape)
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_array_equal(b_copy, b)
assert_allclose(x_ref[0], x[0])
# Test warm-start with single iteration
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_allclose(x_ref[0], x[0])
assert_array_equal(b_copy, b)
| 3,754
| 30.033058
| 80
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_iterative.py
|
""" Test functions for the sparse.linalg._isolve module
"""
import itertools
import platform
import sys
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from numpy import zeros, arange, array, ones, eye, iscomplexobj
from numpy.linalg import norm
from scipy.sparse import spdiags, csr_matrix, kronsum
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg._isolve import (bicg, bicgstab, cg, cgs,
gcrotmk, gmres, lgmres,
minres, qmr, tfqmr)
# TODO check that method preserve shape and type
# TODO test both preconditioner methods
# list of all solvers under test
_SOLVERS = [bicg, bicgstab, cg, cgs, gcrotmk, gmres, lgmres,
minres, qmr, tfqmr]
pytestmark = [
pytest.mark.filterwarnings("ignore:.* keyword argument 'tol'.*"),
pytest.mark.filterwarnings("ignore:.*called without specifying.*")
]
# create parametrized fixture for easy reuse in tests
@pytest.fixture(params=_SOLVERS, scope="session")
def solver(request):
"""
Fixture for all solvers in scipy.sparse.linalg._isolve
"""
return request.param
class Case:
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
self.name = name
self.A = A
if b is None:
self.b = arange(A.shape[0], dtype=float)
else:
self.b = b
if skip is None:
self.skip = []
else:
self.skip = skip
if nonconvergence is None:
self.nonconvergence = []
else:
self.nonconvergence = nonconvergence
class SingleTest:
def __init__(self, A, b, solver, casename, convergence=True):
self.A = A
self.b = b
self.solver = solver
self.name = casename + '-' + solver.__name__
self.convergence = convergence
def __repr__(self):
return f"<{self.name}>"
class IterativeParams:
def __init__(self):
sym_solvers = [minres, cg]
posdef_solvers = [cg]
real_solvers = [minres]
# list of Cases
self.cases = []
# Symmetric and Positive Definite
N = 40
data = ones((3, N))
data[0, :] = 2
data[1, :] = -1
data[2, :] = -1
Poisson1D = spdiags(data, [0, -1, 1], N, N, format='csr')
self.cases.append(Case("poisson1d", Poisson1D))
# note: minres fails for single precision
self.cases.append(Case("poisson1d-F", Poisson1D.astype('f'),
skip=[minres]))
# Symmetric and Negative Definite
self.cases.append(Case("neg-poisson1d", -Poisson1D,
skip=posdef_solvers))
# note: minres fails for single precision
self.cases.append(Case("neg-poisson1d-F", (-Poisson1D).astype('f'),
skip=posdef_solvers + [minres]))
# 2-dimensional Poisson equations
Poisson2D = kronsum(Poisson1D, Poisson1D)
# note: minres fails for 2-d poisson problem,
# it will be fixed in the future PR
self.cases.append(Case("poisson2d", Poisson2D, skip=[minres]))
# note: minres fails for single precision
self.cases.append(Case("poisson2d-F", Poisson2D.astype('f'),
skip=[minres]))
# Symmetric and Indefinite
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]], dtype='d')
RandDiag = spdiags(data, [0], 10, 10, format='csr')
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
self.cases.append(Case("rand-diag-F", RandDiag.astype('f'),
skip=posdef_solvers))
# Random real-valued
np.random.seed(1234)
data = np.random.rand(4, 4)
self.cases.append(Case("rand", data,
skip=posdef_solvers + sym_solvers))
self.cases.append(Case("rand-F", data.astype('f'),
skip=posdef_solvers + sym_solvers))
# Random symmetric real-valued
np.random.seed(1234)
data = np.random.rand(4, 4)
data = data + data.T
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
self.cases.append(Case("rand-sym-F", data.astype('f'),
skip=posdef_solvers))
# Random pos-def symmetric real
np.random.seed(1234)
data = np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-sym-pd", data))
# note: minres fails for single precision
self.cases.append(Case("rand-sym-pd-F", data.astype('f'),
skip=[minres]))
# Random complex-valued
np.random.seed(1234)
data = np.random.rand(4, 4) + 1j * np.random.rand(4, 4)
skip_cmplx = posdef_solvers + sym_solvers + real_solvers
self.cases.append(Case("rand-cmplx", data, skip=skip_cmplx))
self.cases.append(Case("rand-cmplx-F", data.astype('F'),
skip=skip_cmplx))
# Random hermitian complex-valued
np.random.seed(1234)
data = np.random.rand(4, 4) + 1j * np.random.rand(4, 4)
data = data + data.T.conj()
self.cases.append(Case("rand-cmplx-herm", data,
skip=posdef_solvers + real_solvers))
self.cases.append(Case("rand-cmplx-herm-F", data.astype('F'),
skip=posdef_solvers + real_solvers))
# Random pos-def hermitian complex-valued
np.random.seed(1234)
data = np.random.rand(9, 9) + 1j * np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
self.cases.append(Case("rand-cmplx-sym-pd-F", data.astype('F'),
skip=real_solvers))
# Non-symmetric and Positive Definite
#
# cgs, qmr, bicg and tfqmr fail to converge on this one
# -- algorithmic limitation apparently
data = ones((2, 10))
data[0, :] = 2
data[1, :] = -1
A = spdiags(data, [0, -1], 10, 10, format='csr')
self.cases.append(Case("nonsymposdef", A,
skip=sym_solvers + [cgs, qmr, bicg, tfqmr]))
self.cases.append(Case("nonsymposdef-F", A.astype('F'),
skip=sym_solvers + [cgs, qmr, bicg, tfqmr]))
# Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr/tfqmr breakdown
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
assert (A == A.T).all()
self.cases.append(Case("sym-nonpd", A, b,
skip=posdef_solvers,
nonconvergence=[cgs, bicg, bicgstab, qmr, tfqmr]
)
)
def generate_tests(self):
# generate test cases with skips applied
tests = []
for case in self.cases:
for solver in _SOLVERS:
if (solver in case.skip):
continue
if solver in case.nonconvergence:
tests += [SingleTest(case.A, case.b, solver, case.name,
convergence=False)]
else:
tests += [SingleTest(case.A, case.b, solver, case.name)]
return tests
cases = IterativeParams().generate_tests()
@pytest.fixture(params=cases, ids=[x.name for x in cases], scope="module")
def case(request):
"""
Fixture for all cases in IterativeParams
"""
return request.param
def test_maxiter(case):
if not case.convergence:
pytest.skip("Solver - Breakdown case, see gh-8829")
A = case.A
tol = 1e-12
b = case.b
x0 = 0 * b
residuals = []
def callback(x):
residuals.append(norm(b - case.A * x))
x, info = case.solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback)
assert len(residuals) == 1
assert info == 1
def assert_normclose(a, b, tol=1e-8):
residual = norm(a - b)
tolerance = tol * norm(b)
assert residual < tolerance
def test_convergence(case):
A = case.A
if A.dtype.char in "dD":
tol = 1e-8
else:
tol = 1e-2
b = case.b
x0 = 0 * b
x, info = case.solver(A, b, x0=x0, tol=tol)
assert_array_equal(x0, 0 * b) # ensure that x0 is not overwritten
if case.convergence:
assert info == 0
assert norm(A @ x - b) <= norm(b)*tol
else:
assert info != 0
assert norm(A @ x - b) <= norm(b)
def test_precond_dummy(case):
if not case.convergence:
pytest.skip("Solver - Breakdown case, see gh-8829")
tol = 1e-8
def identity(b, which=None):
"""trivial preconditioner"""
return b
A = case.A
M, N = A.shape
# Ensure the diagonal elements of A are non-zero before calculating
# 1.0/A.diagonal()
diagOfA = A.diagonal()
if np.count_nonzero(diagOfA) == len(diagOfA):
spdiags([1.0 / diagOfA], [0], M, N)
b = case.b
x0 = 0 * b
precond = LinearOperator(A.shape, identity, rmatvec=identity)
if case.solver is qmr:
x, info = case.solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)
else:
x, info = case.solver(A, b, M=precond, x0=x0, tol=tol)
assert info == 0
assert norm(A @ x - b) <= norm(b)*tol
A = aslinearoperator(A)
A.psolve = identity
A.rpsolve = identity
x, info = case.solver(A, b, x0=x0, tol=tol)
assert info == 0
assert norm(A @ x - b) <= norm(b)*tol
# Specific test for poisson1d and poisson2d cases
@pytest.mark.parametrize('case', [x for x in IterativeParams().cases
if x.name in ('poisson1d', 'poisson2d')],
ids=['poisson1d', 'poisson2d'])
def test_precond_inverse(case):
for solver in _SOLVERS:
if solver in case.skip or solver is qmr:
continue
tol = 1e-8
def inverse(b, which=None):
"""inverse preconditioner"""
A = case.A
if not isinstance(A, np.ndarray):
A = A.toarray()
return np.linalg.solve(A, b)
def rinverse(b, which=None):
"""inverse preconditioner"""
A = case.A
if not isinstance(A, np.ndarray):
A = A.toarray()
return np.linalg.solve(A.T, b)
matvec_count = [0]
def matvec(b):
matvec_count[0] += 1
return case.A @ b
def rmatvec(b):
matvec_count[0] += 1
return case.A.T @ b
b = case.b
x0 = 0 * b
A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
# Solve with preconditioner
matvec_count = [0]
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
assert info == 0
assert norm(case.A @ x - b) <= norm(b)*tol
# Solution should be nearly instant
assert matvec_count[0] <= 3
def test_atol(solver):
# TODO: minres / tfqmr. It didn't historically use absolute tolerances, so
# fixing it is less urgent.
if solver in (minres, tfqmr):
pytest.skip("TODO: Add atol to minres/tfqmr")
# Historically this is tested as below, all pass but for some reason
# gcrotmk is over-sensitive to difference between random.seed/rng.random
# Hence tol lower bound is changed from -10 to -9
# np.random.seed(1234)
# A = np.random.rand(10, 10)
# A = A @ A.T + 10 * np.eye(10)
# b = 1e3*np.random.rand(10)
rng = np.random.default_rng(168441431005389)
A = rng.uniform(size=[10, 10])
A = A @ A.T + 10*np.eye(10)
b = 1e3 * rng.uniform(size=10)
b_norm = np.linalg.norm(b)
tols = np.r_[0, np.logspace(-9, 2, 7), np.inf]
# Check effect of badly scaled preconditioners
M0 = rng.standard_normal(size=(10, 10))
M0 = M0 @ M0.T
Ms = [None, 1e-6 * M0, 1e6 * M0]
for M, tol, atol in itertools.product(Ms, tols, tols):
if tol == 0 and atol == 0:
continue
if solver is qmr:
if M is not None:
M = aslinearoperator(M)
M2 = aslinearoperator(np.eye(10))
else:
M2 = None
x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol)
else:
x, info = solver(A, b, M=M, tol=tol, atol=atol)
assert info == 0
residual = A @ x - b
err = np.linalg.norm(residual)
atol2 = tol * b_norm
# Added 1.00025 fudge factor because of `err` exceeding `atol` just
# very slightly on s390x (see gh-17839)
assert err <= 1.00025 * max(atol, atol2)
def test_zero_rhs(solver):
rng = np.random.default_rng(1684414984100503)
A = rng.random(size=[10, 10])
A = A @ A.T + 10 * np.eye(10)
b = np.zeros(10)
tols = np.r_[np.logspace(-10, 2, 7)]
for tol in tols:
x, info = solver(A, b, tol=tol)
assert info == 0
assert_allclose(x, 0., atol=1e-15)
x, info = solver(A, b, tol=tol, x0=ones(10))
assert info == 0
assert_allclose(x, 0., atol=tol)
if solver is not minres:
x, info = solver(A, b, tol=tol, atol=0, x0=ones(10))
if info == 0:
assert_allclose(x, 0)
x, info = solver(A, b, tol=tol, atol=tol)
assert info == 0
assert_allclose(x, 0, atol=1e-300)
x, info = solver(A, b, tol=tol, atol=0)
assert info == 0
assert_allclose(x, 0, atol=1e-300)
@pytest.mark.xfail(reason="see gh-18697")
def test_maxiter_worsening(solver):
if solver not in (gmres, lgmres, qmr):
# these were skipped from the very beginning, see gh-9201; gh-14160
pytest.skip("Solver breakdown case")
# Check error does not grow (boundlessly) with increasing maxiter.
# This can occur due to the solvers hitting close to breakdown,
# which they should detect and halt as necessary.
# cf. gh-9100
if (solver is gmres and platform.machine() == 'aarch64'
and sys.version_info[1] == 9):
pytest.xfail(reason="gh-13019")
if (solver is lgmres and
platform.machine() not in ['x86_64' 'x86', 'aarch64', 'arm64']):
# see gh-17839
pytest.xfail(reason="fails on at least ppc64le, ppc64 and riscv64")
# Singular matrix, rhs numerically not in range
A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
[0, -0.13627952880333782 + 6.283185307179586j, 0, 0],
[0, 0, -0.13627952880333782 - 6.283185307179586j, 0],
[0.1112795288033368, 0j, 0j, -0.16127952880333785]])
v = np.ones(4)
best_error = np.inf
# Unable to match the Fortran code tolerance levels with this example
# Original tolerance values
# slack_tol = 7 if platform.machine() == 'aarch64' else 5
slack_tol = 9
for maxiter in range(1, 20):
x, info = solver(A, v, maxiter=maxiter, tol=1e-8, atol=0)
if info == 0:
assert norm(A @ x - v) <= 1e-8 * norm(v)
error = np.linalg.norm(A @ x - v)
best_error = min(best_error, error)
# Check with slack
assert error <= slack_tol * best_error
def test_x0_working(solver):
# Easy problem
rng = np.random.default_rng(1685363802304750)
n = 10
A = rng.random(size=[n, n])
A = A @ A.T
b = rng.random(n)
x0 = rng.random(n)
if solver is minres:
kw = dict(tol=1e-6)
else:
kw = dict(atol=0, tol=1e-6)
x, info = solver(A, b, **kw)
assert info == 0
assert norm(A @ x - b) <= 1e-6 * norm(b)
x, info = solver(A, b, x0=x0, **kw)
assert info == 0
assert norm(A @ x - b) <= 2e-6*norm(b)
def test_x0_equals_Mb(case):
if case.solver is tfqmr:
pytest.skip("Solver does not support x0='Mb'")
A = case.A
b = case.b
x0 = 'Mb'
tol = 1e-8
x, info = case.solver(A, b, x0=x0, tol=tol)
assert_array_equal(x0, 'Mb') # ensure that x0 is not overwritten
assert info == 0
assert norm(A @ x - b) <= tol*norm(b)
# Specific tfqmr test
@pytest.mark.parametrize('case', IterativeParams().cases)
def test_show(case, capsys):
def cb(x):
pass
x, info = tfqmr(case.A, case.b, callback=cb, show=True)
out, err = capsys.readouterr()
if case.name == "sym-nonpd":
# no logs for some reason
exp = ""
elif case.name in ("nonsymposdef", "nonsymposdef-F"):
# Asymmetric and Positive Definite
exp = "TFQMR: Linear solve not converged due to reach MAXIT iterations"
else: # all other cases
exp = "TFQMR: Linear solve converged due to reach TOL iterations"
assert out.startswith(exp)
assert err == ""
class TestQMR:
@pytest.mark.filterwarnings('ignore::scipy.sparse.SparseEfficiencyWarning')
def test_leftright_precond(self):
"""Check that QMR works with left and right preconditioners"""
from scipy.sparse.linalg._dsolve import splu
from scipy.sparse.linalg._interface import LinearOperator
n = 100
dat = ones(n)
A = spdiags([-2 * dat, 4 * dat, -dat], [-1, 0, 1], n, n)
b = arange(n, dtype='d')
L = spdiags([-dat / 2, dat], [-1, 0], n, n)
U = spdiags([4 * dat, -dat], [0, 1], n, n)
L_solver = splu(L)
U_solver = splu(U)
def L_solve(b):
return L_solver.solve(b)
def U_solve(b):
return U_solver.solve(b)
def LT_solve(b):
return L_solver.solve(b, 'T')
def UT_solve(b):
return U_solver.solve(b, 'T')
M1 = LinearOperator((n, n), matvec=L_solve, rmatvec=LT_solve)
M2 = LinearOperator((n, n), matvec=U_solve, rmatvec=UT_solve)
x, info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
assert info == 0
assert_normclose(A @ x, b, tol=1e-8)
class TestGMRES:
def test_basic(self):
A = np.vander(np.arange(10) + 1)[:, ::-1]
b = np.zeros(10)
b[0] = 1
x_gm, err = gmres(A, b, restart=5, maxiter=1)
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
def test_callback(self):
def store_residual(r, rvec):
rvec[rvec.nonzero()[0].max() + 1] = r
# Define, A,b
A = csr_matrix(array([[-2, 1, 0, 0, 0, 0],
[1, -2, 1, 0, 0, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 0, 0, 1, -2, 1],
[0, 0, 0, 0, 1, -2]]))
b = ones((A.shape[0],))
maxiter = 1
rvec = zeros(maxiter + 1)
rvec[0] = 1.0
def callback(r):
return store_residual(r, rvec)
x, flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16,
maxiter=maxiter, callback=callback)
# Expected output from SciPy 1.0.0
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
# Test preconditioned callback
M = 1e-3 * np.eye(A.shape[0])
rvec = zeros(maxiter + 1)
rvec[0] = 1.0
x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter,
callback=callback)
# Expected output from SciPy 1.0.0
# (callback has preconditioned residual!)
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]),
rtol=1e-10)
def test_abi(self):
# Check we don't segfault on gmres with complex argument
A = eye(2)
b = ones(2)
r_x, r_info = gmres(A, b)
r_x = r_x.astype(complex)
x, info = gmres(A.astype(complex), b.astype(complex))
assert iscomplexobj(x)
assert_allclose(r_x, x)
assert r_info == info
def test_atol_legacy(self):
A = eye(2)
b = ones(2)
x, info = gmres(A, b, tol=1e-5)
assert np.linalg.norm(A @ x - b) <= 1e-5 * np.linalg.norm(b)
assert_allclose(x, b, atol=0, rtol=1e-8)
rndm = np.random.RandomState(12345)
A = rndm.rand(30, 30)
b = 1e-6 * ones(30)
x, info = gmres(A, b, tol=1e-7, restart=20)
assert np.linalg.norm(A @ x - b) > 1e-7
A = eye(2)
b = 1e-10 * ones(2)
x, info = gmres(A, b, tol=1e-8, atol=0)
assert np.linalg.norm(A @ x - b) <= 1e-8 * np.linalg.norm(b)
def test_defective_precond_breakdown(self):
# Breakdown due to defective preconditioner
M = np.eye(3)
M[2, 2] = 0
b = np.array([0, 1, 1])
x = np.array([1, 0, 0])
A = np.diag([2, 3, 4])
x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0)
# Should not return nans, nor terminate with false success
assert not np.isnan(x).any()
if info == 0:
assert np.linalg.norm(A @ x - b) <= 1e-15 * np.linalg.norm(b)
# The solution should be OK outside null space of M
assert_allclose(M @ (A @ x), M @ b)
def test_defective_matrix_breakdown(self):
# Breakdown due to defective matrix
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
b = np.array([1, 0, 1])
x, info = gmres(A, b, tol=1e-8, atol=0)
# Should not return nans, nor terminate with false success
assert not np.isnan(x).any()
if info == 0:
assert np.linalg.norm(A @ x - b) <= 1e-8 * np.linalg.norm(b)
# The solution should be OK outside null space of A
assert_allclose(A @ (A @ x), A @ b)
def test_callback_type(self):
# The legacy callback type changes meaning of 'maxiter'
np.random.seed(1)
A = np.random.rand(20, 20)
b = np.random.rand(20)
cb_count = [0]
def pr_norm_cb(r):
cb_count[0] += 1
assert isinstance(r, float)
def x_cb(x):
cb_count[0] += 1
assert isinstance(x, np.ndarray)
# 2 iterations is not enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50)
assert info == 2
assert cb_count[0] == 2
# With `callback_type` specified, no warning should be raised
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50, callback_type='legacy')
assert info == 2
assert cb_count[0] == 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50, callback_type='pr_norm')
assert info == 0
assert cb_count[0] > 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=2,
restart=50, callback_type='x')
assert info == 0
assert cb_count[0] == 1
def test_callback_x_monotonic(self):
# Check that callback_type='x' gives monotonic norm decrease
np.random.seed(1)
A = np.random.rand(20, 20) + np.eye(20)
b = np.random.rand(20)
prev_r = [np.inf]
count = [0]
def x_cb(x):
r = np.linalg.norm(A @ x - b)
assert r <= prev_r[0]
prev_r[0] = r
count[0] += 1
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=20,
restart=10, callback_type='x')
assert info == 20
assert count[0] == 20
def test_restrt_dep(self):
with pytest.warns(
DeprecationWarning,
match="'gmres' keyword argument 'restrt'"
):
gmres(np.array([1]), np.array([1]), restrt=10)
| 24,865
| 31.126615
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_minres.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_
from scipy.sparse.linalg._isolve import minres
from pytest import raises as assert_raises
from .test_iterative import assert_normclose
def get_sample_problem():
# A random 10 x 10 symmetric matrix
np.random.seed(1234)
matrix = np.random.rand(10, 10)
matrix = matrix + matrix.T
# A random vector of length 10
vector = np.random.rand(10)
return matrix, vector
def test_singular():
A, b = get_sample_problem()
A[0, ] = 0
b[0] = 0
xp, info = minres(A, b)
assert_equal(info, 0)
assert_normclose(A.dot(xp), b, tol=1e-5)
def test_x0_is_used_by():
A, b = get_sample_problem()
# Random x0 to feed minres
np.random.seed(12345)
x0 = np.random.rand(10)
trace = []
def trace_iterates(xk):
trace.append(xk)
minres(A, b, x0=x0, callback=trace_iterates)
trace_with_x0 = trace
trace = []
minres(A, b, callback=trace_iterates)
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
def test_shift():
A, b = get_sample_problem()
shift = 0.5
shifted_A = A - shift * np.eye(10)
x1, info1 = minres(A, b, shift=shift)
x2, info2 = minres(shifted_A, b)
assert_equal(info1, 0)
assert_allclose(x1, x2, rtol=1e-5)
def test_asymmetric_fail():
"""Asymmetric matrix should raise `ValueError` when check=True"""
A, b = get_sample_problem()
A[1, 2] = 1
A[2, 1] = 2
with assert_raises(ValueError):
xp, info = minres(A, b, check=True)
def test_minres_non_default_x0():
np.random.seed(1234)
tol = 10**(-6)
a = np.random.randn(5, 5)
a = np.dot(a, a.T)
b = np.random.randn(5)
c = np.random.randn(5)
x = minres(a, b, x0=c, tol=tol)[0]
assert_normclose(a.dot(x), b, tol=tol)
def test_minres_precond_non_default_x0():
np.random.seed(12345)
tol = 10**(-6)
a = np.random.randn(5, 5)
a = np.dot(a, a.T)
b = np.random.randn(5)
c = np.random.randn(5)
m = np.random.randn(5, 5)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, tol=tol)[0]
assert_normclose(a.dot(x), b, tol=tol)
def test_minres_precond_exact_x0():
np.random.seed(1234)
tol = 10**(-6)
a = np.eye(10)
b = np.ones(10)
c = np.ones(10)
m = np.random.randn(10, 10)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, tol=tol)[0]
assert_normclose(a.dot(x), b, tol=tol)
| 2,446
| 23.969388
| 69
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_isolve/tests/test_utils.py
|
import numpy as np
from pytest import raises as assert_raises
import scipy.sparse.linalg._isolve.utils as utils
def test_make_system_bad_shape():
assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
| 247
| 26.555556
| 97
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_propack/setup.py
|
import pathlib
from distutils.sysconfig import get_python_inc
import numpy as np
def _is_32bit():
return np.intp(0).itemsize < 8
def check_propack_submodule():
if not (pathlib.Path(__file__).parent / 'PROPACK/README').exists():
raise RuntimeError("Missing the `PROPACK` submodule! Run "
"`git submodule update --init` to fix this.")
def configuration(parent_package='', top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (gfortran_legacy_flag_hook,
get_g77_abi_wrappers,
needs_g77_abi_wrapper)
lapack_opt = get_info('lapack_opt')
pre_build_hook = gfortran_legacy_flag_hook
f2py_options = None
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
config = Configuration('_propack', parent_package, top_path)
# ------------------------------------------------------------
# Set up the libraries.
# We need a different python extension file for each, because
# names reuse between functions in the LAPACK extensions. This
# could probably be remedied with some work.
# NOTES: this might not longer apply now that we build without
# LAPACK extensions
type_dict = dict(s='single',
d='double',
c='complex8',
z='complex16')
check_propack_submodule()
for prefix, directory in type_dict.items():
propack_lib = f'_{prefix}propack'
# Use risc msg implementation for 64-bit machines, pentium for 32-bit
src_dir = pathlib.Path(__file__).parent / 'PROPACK' / directory
src = list(src_dir.glob('*.F'))
if _is_32bit():
src = [str(p) for p in src if 'risc' not in str(p)]
else:
src = [str(p) for p in src if 'pentium' not in str(p)]
if not _is_32bit():
# don't ask me why, 32-bit blows up with this wrapper
src += get_g77_abi_wrappers(lapack_opt)
cmacros = [('_OPENMP',)]
if needs_g77_abi_wrapper(lapack_opt):
cmacros += [('SCIPY_USE_G77_CDOTC_WRAP', 1)]
config.add_library(propack_lib,
sources=src,
macros=cmacros,
include_dirs=[
src_dir,
# because npy_common.h is used in g77 abi wrappers
get_python_inc(),
np.get_include(),
],
depends=['setup.py'])
ext = config.add_extension(f'_{prefix}propack',
sources=f'{prefix}propack.pyf',
libraries=[propack_lib],
extra_info=lapack_opt,
undef_macros=['_OPENMP'],
f2py_options=f2py_options,
depends=['setup.py'] + src)
ext._pre_build_hook = pre_build_hook
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 3,366
| 36.831461
| 81
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/_svds.py
|
import os
import numpy as np
from .arpack import _arpack # type: ignore[attr-defined]
from . import eigsh
from scipy._lib._util import check_random_state
from scipy.sparse.linalg._interface import LinearOperator, aslinearoperator
from scipy.sparse.linalg._eigen.lobpcg import lobpcg # type: ignore[no-redef]
if os.environ.get("SCIPY_USE_PROPACK"):
from scipy.sparse.linalg._svdp import _svdp
HAS_PROPACK = True
else:
HAS_PROPACK = False
from scipy.linalg import svd
arpack_int = _arpack.timing.nbx.dtype
__all__ = ['svds']
def _herm(x):
return x.T.conj()
def _iv(A, k, ncv, tol, which, v0, maxiter,
return_singular, solver, random_state):
# input validation/standardization for `solver`
# out of order because it's needed for other parameters
solver = str(solver).lower()
solvers = {"arpack", "lobpcg", "propack"}
if solver not in solvers:
raise ValueError(f"solver must be one of {solvers}.")
# input validation/standardization for `A`
A = aslinearoperator(A) # this takes care of some input validation
if not (np.issubdtype(A.dtype, np.complexfloating)
or np.issubdtype(A.dtype, np.floating)):
message = "`A` must be of floating or complex floating data type."
raise ValueError(message)
if np.prod(A.shape) == 0:
message = "`A` must not be empty."
raise ValueError(message)
# input validation/standardization for `k`
kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1
if int(k) != k or not (0 < k <= kmax):
message = "`k` must be an integer satisfying `0 < k < min(A.shape)`."
raise ValueError(message)
k = int(k)
# input validation/standardization for `ncv`
if solver == "arpack" and ncv is not None:
if int(ncv) != ncv or not (k < ncv < min(A.shape)):
message = ("`ncv` must be an integer satisfying "
"`k < ncv < min(A.shape)`.")
raise ValueError(message)
ncv = int(ncv)
# input validation/standardization for `tol`
if tol < 0 or not np.isfinite(tol):
message = "`tol` must be a non-negative floating point value."
raise ValueError(message)
tol = float(tol)
# input validation/standardization for `which`
which = str(which).upper()
whichs = {'LM', 'SM'}
if which not in whichs:
raise ValueError(f"`which` must be in {whichs}.")
# input validation/standardization for `v0`
if v0 is not None:
v0 = np.atleast_1d(v0)
if not (np.issubdtype(v0.dtype, np.complexfloating)
or np.issubdtype(v0.dtype, np.floating)):
message = ("`v0` must be of floating or complex floating "
"data type.")
raise ValueError(message)
shape = (A.shape[0],) if solver == 'propack' else (min(A.shape),)
if v0.shape != shape:
message = f"`v0` must have shape {shape}."
raise ValueError(message)
# input validation/standardization for `maxiter`
if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0):
message = "`maxiter` must be a positive integer."
raise ValueError(message)
maxiter = int(maxiter) if maxiter is not None else maxiter
# input validation/standardization for `return_singular_vectors`
# not going to be flexible with this; too complicated for little gain
rs_options = {True, False, "vh", "u"}
if return_singular not in rs_options:
raise ValueError(f"`return_singular_vectors` must be in {rs_options}.")
random_state = check_random_state(random_state)
return (A, k, ncv, tol, which, v0, maxiter,
return_singular, solver, random_state)
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', random_state=None, options=None):
"""
Partial singular value decomposition of a sparse matrix.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : ndarray, sparse matrix, or LinearOperator
Matrix to decompose of a floating point numeric dtype.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
ncv : int, optional
When ``solver='arpack'``, this is the number of Lanczos vectors
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
maxiter : int, optional
Maximum number of iterations; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
If ``solver='propack'``, the option is respected regardless of the
matrix shape.
solver : {'arpack', 'propack', 'lobpcg'}, optional
The solver used.
:ref:`'arpack' <sparse.linalg.svds-arpack>`,
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and
:ref:`'propack' <sparse.linalg.svds-propack>` are supported.
Default: `'arpack'`.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
on the matrix ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on
which one is smaller size, followed by the Rayleigh-Ritz method
as postprocessing; see
Using the normal matrix, in Rayleigh-Ritz method, (2022, Nov. 19),
Wikipedia, https://w.wiki/4zms.
Alternatively, the PROPACK solver can be called.
Choices of the input matrix `A` numeric dtype may be limited.
Only ``solver="lobpcg"`` supports all floating point dtypes
real: 'np.single', 'np.double', 'np.longdouble' and
complex: 'np.csingle', 'np.cdouble', 'np.clongdouble'.
The ``solver="arpack"`` supports only
'np.single', 'np.double', and 'np.cdouble'.
Examples
--------
Construct a matrix `A` from singular values and vectors.
>>> import numpy as np
>>> from scipy import sparse, linalg, stats
>>> from scipy.sparse.linalg import svds, aslinearoperator, LinearOperator
Construct a dense matrix `A` from singular values and vectors.
>>> rng = np.random.default_rng(258265244568965474821194062361901728911)
>>> orthogonal = stats.ortho_group.rvs(10, random_state=rng)
>>> s = [1e-3, 1, 2, 3, 4] # non-zero singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ np.diag(s) @ vT
With only four singular values/vectors, the SVD approximates the original
matrix.
>>> u4, s4, vT4 = svds(A, k=4)
>>> A4 = u4 @ np.diag(s4) @ vT4
>>> np.allclose(A4, A, atol=1e-3)
True
With all five non-zero singular values/vectors, we can reproduce
the original matrix more accurately.
>>> u5, s5, vT5 = svds(A, k=5)
>>> A5 = u5 @ np.diag(s5) @ vT5
>>> np.allclose(A5, A)
True
The singular values match the expected singular values.
>>> np.allclose(s5, s)
True
Since the singular values are not close to each other in this example,
every singular vector matches as expected up to a difference in sign.
>>> (np.allclose(np.abs(u5), np.abs(u)) and
... np.allclose(np.abs(vT5), np.abs(vT)))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u5.T @ u5, np.eye(5)) and
... np.allclose(vT5 @ vT5.T, np.eye(5)))
True
If there are (nearly) multiple singular values, the corresponding
individual singular vectors may be unstable, but the whole invariant
subspace containing all such singular vectors is computed accurately
as can be measured by angles between subspaces via 'subspace_angles'.
>>> rng = np.random.default_rng(178686584221410808734965903901790843963)
>>> s = [1, 1 + 1e-6] # non-zero singular values
>>> u, _ = np.linalg.qr(rng.standard_normal((99, 2)))
>>> v, _ = np.linalg.qr(rng.standard_normal((99, 2)))
>>> vT = v.T
>>> A = u @ np.diag(s) @ vT
>>> A = A.astype(np.float32)
>>> u2, s2, vT2 = svds(A, k=2, random_state=rng)
>>> np.allclose(s2, s)
True
The angles between the individual exact and computed singular vectors
may not be so small. To check use:
>>> (linalg.subspace_angles(u2[:, :1], u[:, :1]) +
... linalg.subspace_angles(u2[:, 1:], u[:, 1:]))
array([0.06562513]) # may vary
>>> (linalg.subspace_angles(vT2[:1, :].T, vT[:1, :].T) +
... linalg.subspace_angles(vT2[1:, :].T, vT[1:, :].T))
array([0.06562507]) # may vary
As opposed to the angles between the 2-dimensional invariant subspaces
that these vectors span, which are small for rights singular vectors
>>> linalg.subspace_angles(u2, u).sum() < 1e-6
True
as well as for left singular vectors.
>>> linalg.subspace_angles(vT2.T, vT.T).sum() < 1e-6
True
The next example follows that of 'sklearn.decomposition.TruncatedSVD'.
>>> rng = np.random.RandomState(0)
>>> X_dense = rng.random(size=(100, 100))
>>> X_dense[:, 2 * np.arange(50)] = 0
>>> X = sparse.csr_matrix(X_dense)
>>> _, singular_values, _ = svds(X, k=5, random_state=rng)
>>> print(singular_values)
[ 4.3293... 4.4491... 4.5420... 4.5987... 35.2410...]
The function can be called without the transpose of the input matrix
ever explicitly constructed.
>>> rng = np.random.default_rng(102524723947864966825913730119128190974)
>>> G = sparse.rand(8, 9, density=0.5, random_state=rng)
>>> Glo = aslinearoperator(G)
>>> _, singular_values_svds, _ = svds(Glo, k=5, random_state=rng)
>>> _, singular_values_svd, _ = linalg.svd(G.toarray())
>>> np.allclose(singular_values_svds, singular_values_svd[-4::-1])
True
The most memory efficient scenario is where neither
the original matrix, nor its transpose, is explicitly constructed.
Our example computes the smallest singular values and vectors
of 'LinearOperator' constructed from the numpy function 'np.diff' used
column-wise to be consistent with 'LinearOperator' operating on columns.
>>> diff0 = lambda a: np.diff(a, axis=0)
Let us create the matrix from 'diff0' to be used for validation only.
>>> n = 5 # The dimension of the space.
>>> M_from_diff0 = diff0(np.eye(n))
>>> print(M_from_diff0.astype(int))
[[-1 1 0 0 0]
[ 0 -1 1 0 0]
[ 0 0 -1 1 0]
[ 0 0 0 -1 1]]
The matrix 'M_from_diff0' is bi-diagonal and could be alternatively
created directly by
>>> M = - np.eye(n - 1, n, dtype=int)
>>> np.fill_diagonal(M[:,1:], 1)
>>> np.allclose(M, M_from_diff0)
True
Its transpose
>>> print(M.T)
[[-1 0 0 0]
[ 1 -1 0 0]
[ 0 1 -1 0]
[ 0 0 1 -1]
[ 0 0 0 1]]
can be viewed as the incidence matrix; see
Incidence matrix, (2022, Nov. 19), Wikipedia, https://w.wiki/5YXU,
of a linear graph with 5 vertices and 4 edges. The 5x5 normal matrix
``M.T @ M`` thus is
>>> print(M.T @ M)
[[ 1 -1 0 0 0]
[-1 2 -1 0 0]
[ 0 -1 2 -1 0]
[ 0 0 -1 2 -1]
[ 0 0 0 -1 1]]
the graph Laplacian, while the actually used in 'svds' smaller size
4x4 normal matrix ``M @ M.T``
>>> print(M @ M.T)
[[ 2 -1 0 0]
[-1 2 -1 0]
[ 0 -1 2 -1]
[ 0 0 -1 2]]
is the so-called edge-based Laplacian; see
Symmetric Laplacian via the incidence matrix, in Laplacian matrix,
(2022, Nov. 19), Wikipedia, https://w.wiki/5YXW.
The 'LinearOperator' setup needs the options 'rmatvec' and 'rmatmat'
of multiplication by the matrix transpose ``M.T``, but we want to be
matrix-free to save memory, so knowing how ``M.T`` looks like, we
manually construct the following function to be
used in ``rmatmat=diff0t``.
>>> def diff0t(a):
... if a.ndim == 1:
... a = a[:,np.newaxis] # Turn 1D into 2D array
... d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
... d[0, :] = - a[0, :]
... d[1:-1, :] = a[0:-1, :] - a[1:, :]
... d[-1, :] = a[-1, :]
... return d
We check that our function 'diff0t' for the matrix transpose is valid.
>>> np.allclose(M.T, diff0t(np.eye(n-1)))
True
Now we setup our matrix-free 'LinearOperator' called 'diff0_func_aslo'
and for validation the matrix-based 'diff0_matrix_aslo'.
>>> def diff0_func_aslo_def(n):
... return LinearOperator(matvec=diff0,
... matmat=diff0,
... rmatvec=diff0t,
... rmatmat=diff0t,
... shape=(n - 1, n))
>>> diff0_func_aslo = diff0_func_aslo_def(n)
>>> diff0_matrix_aslo = aslinearoperator(M_from_diff0)
And validate both the matrix and its transpose in 'LinearOperator'.
>>> np.allclose(diff0_func_aslo(np.eye(n)),
... diff0_matrix_aslo(np.eye(n)))
True
>>> np.allclose(diff0_func_aslo.T(np.eye(n-1)),
... diff0_matrix_aslo.T(np.eye(n-1)))
True
Having the 'LinearOperator' setup validated, we run the solver.
>>> n = 100
>>> diff0_func_aslo = diff0_func_aslo_def(n)
>>> u, s, vT = svds(diff0_func_aslo, k=3, which='SM')
The singular values squared and the singular vectors are known
explicitly; see
Pure Dirichlet boundary conditions, in
Eigenvalues and eigenvectors of the second derivative,
(2022, Nov. 19), Wikipedia, https://w.wiki/5YX6,
since 'diff' corresponds to first
derivative, and its smaller size n-1 x n-1 normal matrix
``M @ M.T`` represent the discrete second derivative with the Dirichlet
boundary conditions. We use these analytic expressions for validation.
>>> se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
>>> ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
... np.arange(1, 4)) / n)
>>> np.allclose(s, se, atol=1e-3)
True
>>> print(np.allclose(np.abs(u), np.abs(ue), atol=1e-6))
True
"""
args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
solver, random_state)
(A, k, ncv, tol, which, v0, maxiter,
return_singular_vectors, solver, random_state) = args
largest = (which == 'LM')
n, m = A.shape
if n >= m:
X_dot = A.matvec
X_matmat = A.matmat
XH_dot = A.rmatvec
XH_mat = A.rmatmat
transpose = False
else:
X_dot = A.rmatvec
X_matmat = A.rmatmat
XH_dot = A.matvec
XH_mat = A.matmat
transpose = True
dtype = getattr(A, 'dtype', None)
if dtype is None:
dtype = A.dot(np.zeros([m, 1])).dtype
def matvec_XH_X(x):
return XH_dot(X_dot(x))
def matmat_XH_X(x):
return XH_mat(X_matmat(x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
matmat=matmat_XH_X,
shape=(min(A.shape), min(A.shape)))
# Get a low rank approximation of the implicitly defined gramian matrix.
# This is not a stable way to approach the problem.
if solver == 'lobpcg':
if k == 1 and v0 is not None:
X = np.reshape(v0, (-1, 1))
else:
X = random_state.standard_normal(size=(min(A.shape), k))
_, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
largest=largest)
elif solver == 'propack':
if not HAS_PROPACK:
raise ValueError("`solver='propack'` is opt-in due "
"to potential issues on Windows, "
"it can be enabled by setting the "
"`SCIPY_USE_PROPACK` environment "
"variable before importing scipy")
jobu = return_singular_vectors in {True, 'u'}
jobv = return_singular_vectors in {True, 'vh'}
irl_mode = (which == 'SM')
res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None,
compute_u=jobu, compute_v=jobv, irl_mode=irl_mode,
kmax=maxiter, v0=v0, random_state=random_state)
u, s, vh, _ = res # but we'll ignore bnd, the last output
# PROPACK order appears to be largest first. `svds` output order is not
# guaranteed, according to documentation, but for ARPACK and LOBPCG
# they actually are ordered smallest to largest, so reverse for
# consistency.
s = s[::-1]
u = u[:, ::-1]
vh = vh[::-1]
u = u if jobu else None
vh = vh if jobv else None
if return_singular_vectors:
return u, s, vh
else:
return s
elif solver == 'arpack' or solver is None:
if v0 is None:
v0 = random_state.standard_normal(size=(min(A.shape),))
_, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
ncv=ncv, which=which, v0=v0)
# arpack do not guarantee exactly orthonormal eigenvectors
# for clustered eigenvalues, especially in complex arithmetic
eigvec, _ = np.linalg.qr(eigvec)
# the eigenvectors eigvec must be orthonomal here; see gh-16712
Av = X_matmat(eigvec)
if not return_singular_vectors:
s = svd(Av, compute_uv=False, overwrite_a=True)
return s[::-1]
# compute the left singular vectors of X and update the right ones
# accordingly
u, s, vh = svd(Av, full_matrices=False, overwrite_a=True)
u = u[:, ::-1]
s = s[::-1]
vh = vh[::-1]
jobu = return_singular_vectors in {True, 'u'}
jobv = return_singular_vectors in {True, 'vh'}
if transpose:
u_tmp = eigvec @ _herm(vh) if jobu else None
vh = _herm(u) if jobv else None
u = u_tmp
else:
if not jobu:
u = None
vh = vh @ _herm(eigvec) if jobv else None
return u, s, vh
| 20,684
| 36.136445
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_eigen',parent_package,top_path)
config.add_subpackage('arpack')
config.add_subpackage('lobpcg')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 417
| 23.588235
| 60
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/__init__.py
|
"""
Sparse Eigenvalue Solvers
-------------------------
The submodules of sparse.linalg._eigen:
1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
"""
from .arpack import *
from .lobpcg import *
from ._svds import svds
from . import arpack
__all__ = [
'ArpackError', 'ArpackNoConvergence',
'eigs', 'eigsh', 'lobpcg', 'svds'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 460
| 19.043478
| 77
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/_svds_doc.py
|
def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', random_state=None):
"""
Partial singular value decomposition of a sparse matrix using ARPACK.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, optional
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N) - 1``.
Default is 6.
ncv : int, optional
The number of Lanczos vectors generated.
The default is ``min(n, max(2*k + 1, 20))``.
If specified, must satistify ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
is recommended.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration:
an (approximate) left singular vector if ``N > M`` and a right singular
vector otherwise. Must be of length ``min(M, N)``.
Default: random
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed;
default is ``min(M, N) * 10``.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='arpack'``.
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` and
:ref:`'propack' <sparse.linalg.svds-propack>`
are also supported.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using ARPACK as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_matrix, diags
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.toarray(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.toarray())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='lobpcg', random_state=None):
"""
Partial singular value decomposition of a sparse matrix using LOBPCG.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N) - 1``.
ncv : int, optional
Ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
If `k` is 1, the starting vector for iteration:
an (approximate) left singular vector if ``N > M`` and a right singular
vector otherwise. Must be of length ``min(M, N)``.
Ignored otherwise.
Default: random
maxiter : int, default: 20
Maximum number of iterations.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='lobpcg'``.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'propack' <sparse.linalg.svds-propack>`
are also supported.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using LOBPCG as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_matrix, diags
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.toarray(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.toarray())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.todense())) and
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='propack', random_state=None):
"""
Partial singular value decomposition of a sparse matrix using PROPACK.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose. If `A` is a ``LinearOperator``
object, it must define both ``matvec`` and ``rmatvec`` methods.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N)``.
ncv : int, optional
Ignored.
tol : float, optional
The desired relative accuracy for computed singular values.
Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values. Note that choosing
``which='SM'`` will force the ``irl`` option to be set ``True``.
v0 : ndarray, optional
Starting vector for iterations: must be of length ``A.shape[0]``.
If not specified, PROPACK will generate a starting vector.
maxiter : int, optional
Maximum number of iterations / maximal dimension of the Krylov
subspace. Default is ``10 * k``.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: compute only the left singular vectors; return ``None`` for
the right singular vectors.
- ``"vh"``: compute only the right singular vectors; return ``None``
for the left singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='propack'``.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`
are also supported.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
Pseudorandom number generator state used to generate resamples.
If `random_state` is ``None`` (or `np.random`), the
`numpy.random.RandomState` singleton is used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is an interface to the Fortran library PROPACK [1]_.
The current default is to run with IRL mode disabled unless seeking the
smallest singular values/vectors (``which='SM'``).
References
----------
.. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
calculations." Available online. URL
http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_matrix, diags
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='propack')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.todense(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='propack')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.todense())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
| 15,524
| 38.007538
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/tests/test_svds.py
|
import os
import re
import copy
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_array_equal
import pytest
from scipy.linalg import svd, null_space
from scipy.sparse import csc_matrix, issparse, spdiags, random
from scipy.sparse.linalg import LinearOperator, aslinearoperator
if os.environ.get("SCIPY_USE_PROPACK"):
has_propack = True
else:
has_propack = False
from scipy.sparse.linalg import svds
from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence
# --- Helper Functions / Classes ---
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if issparse(m):
m = m.toarray()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError(f"unknown which={which!r}")
return u[:, ii], s[ii], vh[ii]
def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False,
check_svd=True, atol=1e-10, rtol=1e-7):
n, m = A.shape
# Check shapes.
assert_equal(u.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(vh.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (u*s).dot(vh)
assert_equal(A_rebuilt.shape, A.shape)
if check_usvh_A:
assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol)
# Check that u is a semi-orthogonal matrix.
uh_u = np.dot(u.T.conj(), u)
assert_equal(uh_u.shape, (k, k))
assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol)
# Check that vh is a semi-orthogonal matrix.
vh_v = np.dot(vh, vh.T.conj())
assert_equal(vh_v.shape, (k, k))
assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol)
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
if check_svd:
u2, s2, vh2 = sorted_svd(A, k, which)
assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol)
assert_allclose(s, s2, atol=atol, rtol=rtol)
assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol)
def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True,
check_svd=True, atol=1e-10, rtol=1e-7):
n, m = A.shape
# Check shapes.
assert_equal(u.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(vh.shape, (k, m))
# Check that u is a semi-orthogonal matrix.
uh_u = np.dot(u.T.conj(), u)
assert_equal(uh_u.shape, (k, k))
error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k)
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
# Check that vh is a semi-orthogonal matrix.
vh_v = np.dot(vh, vh.T.conj())
assert_equal(vh_v.shape, (k, k))
error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k)
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
# Check residuals
if check_res:
ru = A.T.conj() @ u - vh.T.conj() * s
rus = np.sum(np.abs(ru)) / (n * k)
rvh = A @ vh.T.conj() - u * s
rvhs = np.sum(np.abs(rvh)) / (m * k)
assert_allclose(rus, 0.0, atol=atol, rtol=rtol)
assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol)
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
if check_svd:
u2, s2, vh2 = sorted_svd(A, k, which)
assert_allclose(s, s2, atol=atol, rtol=rtol)
A_rebuilt_svd = (u2*s2).dot(vh2)
A_rebuilt = (u*s).dot(vh)
assert_equal(A_rebuilt.shape, A.shape)
error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k)
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
# --- Test Input Validation ---
# Tests input validation on parameters `k` and `which`.
# Needs better input validation checks for all other parameters.
class SVDSCommonTests:
solver = None
# some of these IV tests could run only once, say with solver=None
_A_empty_msg = "`A` must not be empty."
_A_dtype_msg = "`A` must be of floating or complex floating data type"
_A_type_msg = "type not understood"
_A_ndim_msg = "array must have ndim <= 2"
_A_validation_inputs = [
(np.asarray([[]]), ValueError, _A_empty_msg),
(np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg),
("hi", TypeError, _A_type_msg),
(np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)]
@pytest.mark.parametrize("args", _A_validation_inputs)
def test_svds_input_validation_A(self, args):
A, error_type, message = args
with pytest.raises(error_type, match=message):
svds(A, k=1, solver=self.solver)
@pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"])
def test_svds_input_validation_k_1(self, k):
rng = np.random.default_rng(0)
A = rng.random((4, 3))
# propack can do complete SVD
if self.solver == 'propack' and k == 3:
if not has_propack:
pytest.skip("PROPACK not enabled")
res = svds(A, k=k, solver=self.solver)
_check_svds(A, k, *res, check_usvh_A=True, check_svd=True)
return
message = ("`k` must be an integer satisfying")
with pytest.raises(ValueError, match=message):
svds(A, k=k, solver=self.solver)
def test_svds_input_validation_k_2(self):
# I think the stack trace is reasonable when `k` can't be converted
# to an int.
message = "int() argument must be a"
with pytest.raises(TypeError, match=re.escape(message)):
svds(np.eye(10), k=[], solver=self.solver)
message = "invalid literal for int()"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), k="hi", solver=self.solver)
@pytest.mark.parametrize("tol", (-1, np.inf, np.nan))
def test_svds_input_validation_tol_1(self, tol):
message = "`tol` must be a non-negative floating point value."
with pytest.raises(ValueError, match=message):
svds(np.eye(10), tol=tol, solver=self.solver)
@pytest.mark.parametrize("tol", ([], 'hi'))
def test_svds_input_validation_tol_2(self, tol):
# I think the stack trace is reasonable here
message = "'<' not supported between instances"
with pytest.raises(TypeError, match=message):
svds(np.eye(10), tol=tol, solver=self.solver)
@pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0))
def test_svds_input_validation_which(self, which):
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/4590
# Function was not checking for eigenvalue type and unintended
# values could be returned.
with pytest.raises(ValueError, match="`which` must be in"):
svds(np.eye(10), which=which, solver=self.solver)
@pytest.mark.parametrize("transpose", (True, False))
@pytest.mark.parametrize("n", range(4, 9))
def test_svds_input_validation_v0_1(self, transpose, n):
rng = np.random.default_rng(0)
A = rng.random((5, 7))
v0 = rng.random(n)
if transpose:
A = A.T
k = 2
message = "`v0` must have shape"
required_length = (A.shape[0] if self.solver == 'propack'
else min(A.shape))
if n != required_length:
with pytest.raises(ValueError, match=message):
svds(A, k=k, v0=v0, solver=self.solver)
def test_svds_input_validation_v0_2(self):
A = np.ones((10, 10))
v0 = np.ones((1, 10))
message = "`v0` must have shape"
with pytest.raises(ValueError, match=message):
svds(A, k=1, v0=v0, solver=self.solver)
@pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int)))
def test_svds_input_validation_v0_3(self, v0):
A = np.ones((10, 10))
message = "`v0` must be of floating or complex floating data type."
with pytest.raises(ValueError, match=message):
svds(A, k=1, v0=v0, solver=self.solver)
@pytest.mark.parametrize("maxiter", (-1, 0, 5.5))
def test_svds_input_validation_maxiter_1(self, maxiter):
message = ("`maxiter` must be a positive integer.")
with pytest.raises(ValueError, match=message):
svds(np.eye(10), maxiter=maxiter, solver=self.solver)
def test_svds_input_validation_maxiter_2(self):
# I think the stack trace is reasonable when `k` can't be converted
# to an int.
message = "int() argument must be a"
with pytest.raises(TypeError, match=re.escape(message)):
svds(np.eye(10), maxiter=[], solver=self.solver)
message = "invalid literal for int()"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), maxiter="hi", solver=self.solver)
@pytest.mark.parametrize("rsv", ('ekki', 10))
def test_svds_input_validation_return_singular_vectors(self, rsv):
message = "`return_singular_vectors` must be in"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver)
# --- Test Parameters ---
@pytest.mark.parametrize("k", [3, 5])
@pytest.mark.parametrize("which", ["LM", "SM"])
def test_svds_parameter_k_which(self, k, which):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
# check that the `k` parameter sets the number of eigenvalues/
# eigenvectors returned.
# Also check that the `which` parameter sets whether the largest or
# smallest eigenvalues are returned
rng = np.random.default_rng(0)
A = rng.random((10, 10))
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
res = svds(A, k=k, which=which, solver=self.solver,
random_state=0)
else:
res = svds(A, k=k, which=which, solver=self.solver,
random_state=0)
_check_svds(A, k, *res, which=which, atol=8e-10)
# loop instead of parametrize for simplicity
def test_svds_parameter_tol(self):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
return # TODO: needs work, disabling for now
# check the effect of the `tol` parameter on solver accuracy by solving
# the same problem with varying `tol` and comparing the eigenvalues
# against ground truth computed
n = 100 # matrix size
k = 3 # number of eigenvalues to check
# generate a random, sparse-ish matrix
# effect isn't apparent for matrices that are too small
rng = np.random.default_rng(0)
A = rng.random((n, n))
A[A > .1] = 0
A = A @ A.T
_, s, _ = svd(A) # calculate ground truth
# calculate the error as a function of `tol`
A = csc_matrix(A)
def err(tol):
if self.solver == 'lobpcg' and tol == 1e-4:
with pytest.warns(UserWarning, match="Exited at iteration"):
_, s2, _ = svds(A, k=k, v0=np.ones(n),
solver=self.solver, tol=tol)
else:
_, s2, _ = svds(A, k=k, v0=np.ones(n),
solver=self.solver, tol=tol)
return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1])
tols = [1e-4, 1e-2, 1e0] # tolerance levels to check
# for 'arpack' and 'propack', accuracies make discrete steps
accuracies = {'propack': [1e-12, 1e-6, 1e-4],
'arpack': [2e-15, 1e-10, 1e-10],
'lobpcg': [1e-11, 1e-3, 10]}
for tol, accuracy in zip(tols, accuracies[self.solver]):
error = err(tol)
assert error < accuracy
assert error > accuracy/10
def test_svd_v0(self):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
# check that the `v0` parameter affects the solution
n = 100
k = 1
# If k != 1, LOBPCG needs more initial vectors, which are generated
# with random_state, so it does not pass w/ k >= 2.
# For some other values of `n`, the AssertionErrors are not raised
# with different v0s, which is reasonable.
rng = np.random.default_rng(0)
A = rng.random((n, n))
# with the same v0, solutions are the same, and they are accurate
# v0 takes precedence over random_state
v0a = rng.random(n)
res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0)
res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1)
for idx in range(3):
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1a)
# with the same v0, solutions are the same, and they are accurate
v0b = rng.random(n)
res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2)
res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3)
for idx in range(3):
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1b)
# with different v0, solutions can be numerically different
message = "Arrays are not equal"
with pytest.raises(AssertionError, match=message):
assert_equal(res1a, res1b)
def test_svd_random_state(self):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
# check that the `random_state` parameter affects the solution
# Admittedly, `n` and `k` are chosen so that all solver pass all
# these checks. That's a tall order, since LOBPCG doesn't want to
# achieve the desired accuracy and ARPACK often returns the same
# singular values/vectors for different v0.
n = 100
k = 1
rng = np.random.default_rng(0)
A = rng.random((n, n))
# with the same random_state, solutions are the same and accurate
res1a = svds(A, k, solver=self.solver, random_state=0)
res2a = svds(A, k, solver=self.solver, random_state=0)
for idx in range(3):
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1a)
# with the same random_state, solutions are the same and accurate
res1b = svds(A, k, solver=self.solver, random_state=1)
res2b = svds(A, k, solver=self.solver, random_state=1)
for idx in range(3):
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1b)
# with different random_state, solutions can be numerically different
message = "Arrays are not equal"
with pytest.raises(AssertionError, match=message):
assert_equal(res1a, res1b)
@pytest.mark.parametrize("random_state", (0, 1,
np.random.RandomState(0),
np.random.default_rng(0)))
def test_svd_random_state_2(self, random_state):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
n = 100
k = 1
rng = np.random.default_rng(0)
A = rng.random((n, n))
random_state_2 = copy.deepcopy(random_state)
# with the same random_state, solutions are the same and accurate
res1a = svds(A, k, solver=self.solver, random_state=random_state)
res2a = svds(A, k, solver=self.solver, random_state=random_state_2)
for idx in range(3):
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1a)
@pytest.mark.parametrize("random_state", (None,
np.random.RandomState(0),
np.random.default_rng(0)))
def test_svd_random_state_3(self, random_state):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
n = 100
k = 5
rng = np.random.default_rng(0)
A = rng.random((n, n))
# random_state in different state produces accurate - but not
# not necessarily identical - results
res1a = svds(A, k, solver=self.solver, random_state=random_state)
res2a = svds(A, k, solver=self.solver, random_state=random_state)
_check_svds(A, k, *res1a, atol=2e-10, rtol=1e-6)
_check_svds(A, k, *res2a, atol=2e-10, rtol=1e-6)
message = "Arrays are not equal"
with pytest.raises(AssertionError, match=message):
assert_equal(res1a, res2a)
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_svd_maxiter(self):
# check that maxiter works as expected: should not return accurate
# solution after 1 iteration, but should with default `maxiter`
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
A = np.diag(np.arange(9)).astype(np.float64)
k = 1
u, s, vh = sorted_svd(A, k)
if self.solver == 'arpack':
message = "ARPACK error -1: No convergence"
with pytest.raises(ArpackNoConvergence, match=message):
svds(A, k, ncv=3, maxiter=1, solver=self.solver)
elif self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="Exited at iteration"):
svds(A, k, maxiter=1, solver=self.solver)
elif self.solver == 'propack':
message = "k=1 singular triplets did not converge within"
with pytest.raises(np.linalg.LinAlgError, match=message):
svds(A, k, maxiter=1, solver=self.solver)
ud, sd, vhd = svds(A, k, solver=self.solver) # default maxiter
_check_svds(A, k, ud, sd, vhd, atol=1e-8)
assert_allclose(np.abs(ud), np.abs(u), atol=1e-8)
assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8)
assert_allclose(np.abs(sd), np.abs(s), atol=1e-9)
@pytest.mark.parametrize("rsv", (True, False, 'u', 'vh'))
@pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5)))
def test_svd_return_singular_vectors(self, rsv, shape):
# check that the return_singular_vectors parameter works as expected
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
rng = np.random.default_rng(0)
A = rng.random(shape)
k = 2
M, N = shape
u, s, vh = sorted_svd(A, k)
respect_u = True if self.solver == 'propack' else M <= N
respect_vh = True if self.solver == 'propack' else M > N
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
if rsv is False:
s2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
assert_allclose(s2, s)
elif rsv == 'u' and respect_u:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
assert vh2 is None
elif rsv == 'vh' and respect_vh:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
assert u2 is None
assert_allclose(s2, s)
assert_allclose(np.abs(vh2), np.abs(vh))
else:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
if u2 is not None:
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
if vh2 is not None:
assert_allclose(np.abs(vh2), np.abs(vh))
else:
if rsv is False:
s2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
assert_allclose(s2, s)
elif rsv == 'u' and respect_u:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
assert vh2 is None
elif rsv == 'vh' and respect_vh:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
assert u2 is None
assert_allclose(s2, s)
assert_allclose(np.abs(vh2), np.abs(vh))
else:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, random_state=rng)
if u2 is not None:
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
if vh2 is not None:
assert_allclose(np.abs(vh2), np.abs(vh))
# --- Test Basic Functionality ---
# Tests the accuracy of each solver for real and complex matrices provided
# as list, dense array, sparse matrix, and LinearOperator.
A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]]
A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]]
@pytest.mark.filterwarnings("ignore:k >= N - 1",
reason="needed to demonstrate #16725")
@pytest.mark.parametrize('A', (A1, A2))
@pytest.mark.parametrize('k', range(1, 5))
# PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM"))
@pytest.mark.parametrize('real', (True, False))
@pytest.mark.parametrize('transpose', (False, True))
# In gh-14299, it was suggested the `svds` should _not_ work with lists
@pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix,
aslinearoperator))
def test_svd_simple(self, A, k, real, transpose, lo_type):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
A = np.asarray(A)
A = np.real(A) if real else A
A = A.T if transpose else A
A2 = lo_type(A)
# could check for the appropriate errors, but that is tested above
if k > min(A.shape):
pytest.skip("`k` cannot be greater than `min(A.shape)`")
if self.solver != 'propack' and k >= min(A.shape):
pytest.skip("Only PROPACK supports complete SVD")
if self.solver == 'arpack' and not real and k == min(A.shape) - 1:
pytest.skip("#16725")
if self.solver == 'propack' and (np.intp(0).itemsize < 8 and not real):
pytest.skip('PROPACK complex-valued SVD methods not available '
'for 32-bit builds')
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
u, s, vh = svds(A2, k, solver=self.solver)
else:
u, s, vh = svds(A2, k, solver=self.solver)
_check_svds(A, k, u, s, vh, atol=3e-10)
def test_svd_linop(self):
solver = self.solver
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not available")
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:, j], s[j], VH[j, :]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
if solver == 'propack':
v0 = np.ones(n)
else:
v0 = np.ones(min(A.shape))
if solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver))
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver))
else:
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver))
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
# TODO: arpack crashes when v0=v0, which="SM"
kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {}
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
**kwargs))
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
**kwargs))
else:
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
**kwargs))
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
**kwargs))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1 + 1, s2 + 1)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
if self.solver == 'propack' and np.intp(0).itemsize < 8:
pytest.skip('PROPACK complex-valued SVD methods '
'not available for 32-bit builds')
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
if self.solver == 'lobpcg':
with pytest.warns(UserWarning,
match="The problem size"):
U1, s1, VH1 = reorder(svds(A, k, which="LM",
solver=solver))
U2, s2, VH2 = reorder(svds(L, k, which="LM",
solver=solver))
else:
U1, s1, VH1 = reorder(svds(A, k, which="LM",
solver=solver))
U2, s2, VH2 = reorder(svds(L, k, which="LM",
solver=solver))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)),
rtol=eps)
SHAPES = ((100, 100), (100, 101), (101, 100))
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
@pytest.mark.parametrize("shape", SHAPES)
# ARPACK supports only dtype float, complex, or np.float32
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
def test_small_sigma_sparse(self, shape, dtype):
# https://github.com/scipy/scipy/pull/11829
solver = self.solver
# 2do: PROPACK fails orthogonality of singular vectors
# if dtype == complex and self.solver == 'propack':
# pytest.skip("PROPACK unsupported for complex dtype")
if solver == 'propack':
pytest.skip("PROPACK failures unrelated to PR")
rng = np.random.default_rng(0)
k = 5
(m, n) = shape
S = random(m, n, density=0.1, random_state=rng)
if dtype == complex:
S = + 1j * random(m, n, density=0.1, random_state=rng)
e = np.ones(m)
e[0:5] *= 1e1 ** np.arange(-5, 0, 1)
S = spdiags(e, 0, m, m) @ S
S = S.astype(dtype)
u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000)
c_svd = False # partial SVD can be different from full SVD
_check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=1e-1)
# --- Test Edge Cases ---
# Checks a few edge cases.
@pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6)))
@pytest.mark.parametrize("dtype", (float, complex))
def test_svd_LM_ones_matrix(self, shape, dtype):
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
n, m = shape
A = np.ones((n, m), dtype=dtype)
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U, s, VH = svds(A, k, solver=self.solver)
else:
U, s, VH = svds(A, k, solver=self.solver)
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
s = np.array(sorted(s)[:-1]) + 1
z = np.ones_like(s)
assert_allclose(s, z)
@pytest.mark.filterwarnings("ignore:k >= N - 1",
reason="needed to demonstrate #16725")
@pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2)))
@pytest.mark.parametrize("dtype", (float, complex))
def test_zero_matrix(self, shape, dtype):
# Check that svds can deal with matrices containing only zeros;
# see https://github.com/scipy/scipy/issues/3452/
# shape = (4, 2) is included because it is the particular case
# reported in the issue
k = 1
n, m = shape
A = np.zeros((n, m), dtype=dtype)
if (self.solver == 'arpack' and dtype is complex
and k == min(A.shape) - 1):
pytest.skip("#16725")
if self.solver == 'propack':
pytest.skip("PROPACK failures unrelated to PR #16712")
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U, s, VH = svds(A, k, solver=self.solver)
else:
U, s, VH = svds(A, k, solver=self.solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
# Check that the singular values are zero.
assert_array_equal(s, 0)
@pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20)))
# ARPACK supports only dtype float, complex, or np.float32
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
def test_small_sigma(self, shape, dtype):
if not has_propack:
pytest.skip("PROPACK not enabled")
# https://github.com/scipy/scipy/pull/11829
if dtype == complex and self.solver == 'propack':
pytest.skip("PROPACK unsupported for complex dtype")
rng = np.random.default_rng(179847540)
A = rng.random(shape).astype(dtype)
u, _, vh = svd(A, full_matrices=False)
if dtype == np.float32:
e = 10.0
else:
e = 100.0
t = e**(-np.arange(len(vh))).astype(dtype)
A = (u*t).dot(vh)
k = 4
u, s, vh = svds(A, k, solver=self.solver, maxiter=100)
t = np.sum(s > 0)
assert_equal(t, k)
# LOBPCG needs larger atol and rtol to pass
_check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False)
# ARPACK supports only dtype float, complex, or np.float32
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
def test_small_sigma2(self, dtype):
if self.solver == 'propack':
if not has_propack:
pytest.skip("PROPACK not enabled")
elif dtype == np.float32:
pytest.skip("Test failures in CI, see gh-17004")
elif dtype == complex:
# https://github.com/scipy/scipy/issues/11406
pytest.skip("PROPACK unsupported for complex dtype")
rng = np.random.default_rng(179847540)
# create a 10x10 singular matrix with a 4-dim null space
dim = 4
size = 10
x = rng.random((size, size-dim))
y = x[:, :dim] * rng.random(dim)
mat = np.hstack((x, y))
mat = mat.astype(dtype)
nz = null_space(mat)
assert_equal(nz.shape[1], dim)
# Tolerances atol and rtol adjusted to pass np.float32
# Use non-sparse svd
u, s, vh = svd(mat)
# Singular values are 0:
assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0)
# Smallest right singular vectors in null space:
assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0)
# Smallest singular values should be 0
sp_mat = csc_matrix(mat)
su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver)
# Smallest dim singular values are 0:
assert_allclose(ss, 0, atol=1e-5, rtol=1e0)
# Smallest singular vectors via svds in null space:
n, m = mat.shape
if n < m: # else the assert fails with some libraries unclear why
assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0)
assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0)
# --- Perform tests with each solver ---
class Test_SVDS_once():
@pytest.mark.parametrize("solver", ['ekki', object])
def test_svds_input_validation_solver(self, solver):
message = "solver must be one of"
with pytest.raises(ValueError, match=message):
svds(np.ones((3, 4)), k=2, solver=solver)
class Test_SVDS_ARPACK(SVDSCommonTests):
def setup_method(self):
self.solver = 'arpack'
@pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"])
def test_svds_input_validation_ncv_1(self, ncv):
rng = np.random.default_rng(0)
A = rng.random((6, 7))
k = 3
if ncv in {4, 5}:
u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver)
# partial decomposition, so don't check that u@diag(s)@vh=A;
# do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
_check_svds(A, k, u, s, vh)
else:
message = ("`ncv` must be an integer satisfying")
with pytest.raises(ValueError, match=message):
svds(A, k=k, ncv=ncv, solver=self.solver)
def test_svds_input_validation_ncv_2(self):
# I think the stack trace is reasonable when `ncv` can't be converted
# to an int.
message = "int() argument must be a"
with pytest.raises(TypeError, match=re.escape(message)):
svds(np.eye(10), ncv=[], solver=self.solver)
message = "invalid literal for int()"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), ncv="hi", solver=self.solver)
# I can't see a robust relationship between `ncv` and relevant outputs
# (e.g. accuracy, time), so no test of the parameter.
class Test_SVDS_LOBPCG(SVDSCommonTests):
def setup_method(self):
self.solver = 'lobpcg'
def test_svd_random_state_3(self):
pytest.xfail("LOBPCG is having trouble with accuracy.")
class Test_SVDS_PROPACK(SVDSCommonTests):
def setup_method(self):
self.solver = 'propack'
def test_svd_LM_ones_matrix(self):
message = ("PROPACK does not return orthonormal singular vectors "
"associated with zero singular values.")
# There are some other issues with this matrix of all ones, e.g.
# `which='sm'` and `k=1` returns the largest singular value
pytest.xfail(message)
def test_svd_LM_zeros_matrix(self):
message = ("PROPACK does not return orthonormal singular vectors "
"associated with zero singular values.")
pytest.xfail(message)
| 37,553
| 40.313531
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/arpack/setup.py
|
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (get_g77_abi_wrappers,
gfortran_legacy_flag_hook,
blas_ilp64_pre_build_hook,
uses_blas64, get_f2py_int64_options)
if uses_blas64():
lapack_opt = get_info('lapack_ilp64_opt', 2)
pre_build_hook = (gfortran_legacy_flag_hook,
blas_ilp64_pre_build_hook(lapack_opt))
f2py_options = get_f2py_int64_options()
else:
lapack_opt = get_info('lapack_opt')
pre_build_hook = gfortran_legacy_flag_hook
f2py_options = None
config = Configuration('arpack', parent_package, top_path)
arpack_sources = [join('ARPACK','SRC', '*.f')]
arpack_sources.extend([join('ARPACK','UTIL', '*.f')])
arpack_sources += get_g77_abi_wrappers(lapack_opt)
config.add_library('arpack_scipy', sources=arpack_sources,
include_dirs=[join('ARPACK', 'SRC')],
_pre_build_hook=pre_build_hook)
ext = config.add_extension('_arpack',
sources=['arpack.pyf.src'],
libraries=['arpack_scipy'],
f2py_options=f2py_options,
extra_info=lapack_opt,
depends=arpack_sources)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
# Add license files
config.add_data_files('ARPACK/COPYING')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,842
| 34.442308
| 72
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/arpack/__init__.py
|
"""
Eigenvalue solver using iterative methods.
Find k eigenvectors and eigenvalues of a matrix A using the
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
These methods are most useful for large sparse matrices.
- eigs(A,k)
- eigsh(A,k)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
from .arpack import *
| 562
| 25.809524
| 71
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/arpack/arpack.py
|
"""
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: https://github.com/opencollab/arpack-ng
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no specialized interface for complex Hermitian matrices.
# To find eigenvalues of a complex Hermitian matrix you
# may use eigsh(), but eigsh() will simply call eigs()
# and return the real part of the eigenvalues thus obtained.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
import numpy as np
import warnings
from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator
from scipy.sparse import eye, issparse
from scipy.linalg import eig, eigh, lu_factor, lu_solve
from scipy.sparse._sputils import isdense, is_pydata_spmatrix
from scipy.sparse.linalg import gmres, splu
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import ReentrancyLock
from . import _arpack
arpack_int = _arpack.timing.nbx.dtype
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence']
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error"
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"
}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
def choose_ncv(k):
"""
Choose number of lanczos vectors based on target number
of singular/eigen values and vectors to compute, k.
"""
return max(2 * k + 1, 20)
class _ArpackParams:
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
# ARPACK will use a random initial vector.
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = choose_ncv(k)
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, arpack_int)
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = f"{msg} [{err}]"
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than ndim(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.workd = _aligned_zeros(3 * n, self.tp)
self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, arpack_int)
def iterate(self):
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.workd = _aligned_zeros(3 * n, self.tp)
self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, arpack_int)
if self.tp in 'FD':
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.rwork = _aligned_zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(return_eigenvectors,
howmny, sselect, sigmar, sigmai, workev,
self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr,
self.workd, self.workl, self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
if self.mode in (1, 2):
rd = d
elif self.mode in (3, 4):
rd = 1 / (d - self.sigma)
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) (complex pairs come together)
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
ind = ind[-k:][::-1]
elif self.which in ['SR', 'SM', 'SI']:
ind = ind[:k]
d = d[ind]
z = z[:, ind]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(return_eigenvectors,
howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr,
self.workd, self.workl, self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
self.shape = M.shape
self.dtype = M.dtype
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
x = np.asarray(x)
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x).astype(self.dtype))
+ 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
else:
return self.M_lu.solve(x.astype(self.dtype))
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
self.shape = M.shape
self.dtype = M.dtype
def _matvec(self, x):
return lu_solve(self.M_lu, x)
def gmres_loose(A, b, tol):
"""
gmres with looser termination condition.
"""
b = np.asarray(b)
min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
return gmres(A, b, rtol=max(tol, min_tol), atol=0)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres_loose, tol=0):
self.M = M
if hasattr(M, 'dtype'):
self.dtype = M.dtype
else:
x = np.zeros(M.shape[1])
self.dtype = (M * x).dtype
self.shape = M.shape
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = 2 * np.finfo(self.dtype).eps
self.ifunc = ifunc
self.tol = tol
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
self.A = A
self.M = M
self.sigma = sigma
def mult_func(x):
return A.matvec(x) - sigma * M.matvec(x)
def mult_func_M_None(x):
return A.matvec(x) - sigma * x
x = np.zeros(A.shape[1])
if M is None:
dtype = mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
mult_func_M_None,
dtype=dtype)
else:
dtype = mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
mult_func,
dtype=dtype)
self.shape = A.shape
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = 2 * np.finfo(self.OP.dtype).eps
self.ifunc = ifunc
self.tol = tol
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
@property
def dtype(self):
return self.OP.dtype
def _fast_spmatrix_to_csc(A, hermitian=False):
"""Convert sparse matrix to CSC (by transposing, if possible)"""
if (A.format == "csr" and hermitian
and not np.issubdtype(A.dtype, np.complexfloating)):
return A.T
elif is_pydata_spmatrix(A):
# No need to convert
return A
else:
return A.tocsc()
def get_inv_matvec(M, hermitian=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif issparse(M) or is_pydata_spmatrix(M):
M = _fast_spmatrix_to_csc(M, hermitian=hermitian)
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, hermitian=hermitian, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif issparse(A) or is_pydata_spmatrix(A):
A = A - sigma * eye(A.shape[0])
A = _fast_spmatrix_to_csc(A, hermitian=hermitian)
return SpLuInv(A).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A),
M, sigma, tol=tol).matvec
else:
if ((not isdense(A) and not issparse(A) and not is_pydata_spmatrix(A)) or
(not isdense(M) and not issparse(M) and not is_pydata_spmatrix(A))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M),
sigma, tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian)
return SpLuInv(OP).matvec
# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
# lock and a re-entering check.
_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
"ARPACK is not re-entrant")
def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
Minv=None, OPinv=None, OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : ndarray, sparse matrix or LinearOperator
An array, sparse matrix, or LinearOperator representing
the operation ``A @ x``, where A is a real or complex square matrix.
k : int, optional
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N-1. It is not possible to compute all
eigenvectors of a matrix.
M : ndarray, sparse matrix or LinearOperator, optional
An array, sparse matrix, or LinearOperator representing
the operation M@x for the generalized eigenvalue problem
A @ x = w * M @ x.
M must represent a real symmetric matrix if A is real, and must
represent a complex Hermitian matrix if A is complex. For best
results, the data type of M should be the same as that of A.
Additionally:
If `sigma` is None, M is positive definite
If sigma is specified, M is positive semi-definite
If sigma is None, eigs requires an operator to compute the solution
of the linear equation ``M @ x = b``. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
``x = Minv @ b = M^-1 @ b``.
sigma : real or complex, optional
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
``[A - sigma * M] @ x = b``, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues ``w'[i]`` where:
If A is real and OPpart == 'r' (default),
``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
If A is real and OPpart == 'i',
``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
v0 : ndarray, optional
Starting vector for iteration.
Default: random
ncv : int, optional
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
Default: ``min(n, max(2*k + 1, 20))``
which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
Which `k` eigenvectors and eigenvalues to find:
'LM' : largest magnitude
'SM' : smallest magnitude
'LR' : largest real part
'SR' : smallest real part
'LI' : largest imaginary part
'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed
Default: ``n*10``
tol : float, optional
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : bool, optional
Return eigenvectors (True) in addition to eigenvalues
Minv : ndarray, sparse matrix or LinearOperator, optional
See notes in M, above.
OPinv : ndarray, sparse matrix or LinearOperator, optional
See notes in sigma, above.
OPpart : {'r' or 'i'}, optional
See notes in sigma, above
Returns
-------
w : ndarray
Array of k eigenvalues.
v : ndarray
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> import numpy as np
>>> from scipy.sparse.linalg import eigs
>>> id = np.eye(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
"""
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix (shape={A.shape})')
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0:
raise ValueError("k=%d must be greater than 0." % k)
if k >= n - 1:
warnings.warn("k >= N - 1 for N * N square matrix. "
"Attempting to use scipy.linalg.eig instead.",
RuntimeWarning)
if issparse(A):
raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
"k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
" reduce k.")
if isinstance(A, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
"A with k >= N - 1.")
if isinstance(M, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
"M with k >= N - 1.")
return eig(A, b=M, right=return_eigenvectors)
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
with _ARPACK_LOCK:
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
Minv=None, OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex Hermitian matrix A.
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i].
Note that there is no specialized routine for the case when A is a complex
Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the
real parts of the eigenvalues thus obtained.
Parameters
----------
A : ndarray, sparse matrix or LinearOperator
A square operator representing the operation ``A @ x``, where ``A`` is
real symmetric or complex Hermitian. For buckling mode (see below)
``A`` must additionally be positive-definite.
k : int, optional
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array representing the `k` eigenvectors. The column ``v[:, i]`` is
the eigenvector corresponding to the eigenvalue ``w[i]``.
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation ``M @ x`` for the generalized eigenvalue problem
A @ x = w * M @ x.
M must represent a real symmetric matrix if A is real, and must
represent a complex Hermitian matrix if A is complex. For best
results, the data type of M should be the same as that of A.
Additionally:
If sigma is None, M is symmetric positive definite.
If sigma is specified, M is symmetric positive semi-definite.
In buckling mode, M is symmetric indefinite.
If sigma is None, eigsh requires an operator to compute the solution
of the linear equation ``M @ x = b``. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
``x = Minv @ b = M^-1 @ b``.
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
``[A - sigma * M] x = b``, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues ``w'[i]`` where:
if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
(see further discussion in 'mode' below)
v0 : ndarray, optional
Starting vector for iteration.
Default: random
ncv : int, optional
The number of Lanczos vectors generated ncv must be greater than k and
smaller than n; it is recommended that ``ncv > 2*k``.
Default: ``min(n, max(2*k + 1, 20))``
which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex Hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find:
'LM' : Largest (in magnitude) eigenvalues.
'SM' : Smallest (in magnitude) eigenvalues.
'LA' : Largest (algebraic) eigenvalues.
'SA' : Smallest (algebraic) eigenvalues.
'BE' : Half (k/2) from each end of the spectrum.
When k is odd, return one more (k/2+1) from the high end.
When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed.
Default: ``n*10``
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : bool
Return eigenvectors (True) in addition to eigenvalues.
This value determines the order in which eigenvalues are sorted.
The sort order is also dependent on the `which` variable.
For which = 'LM' or 'SA':
If `return_eigenvectors` is True, eigenvalues are sorted by
algebraic value.
If `return_eigenvectors` is False, eigenvalues are sorted by
absolute value.
For which = 'BE' or 'LA':
eigenvalues are always sorted by algebraic value.
For which = 'SM':
If `return_eigenvectors` is True, eigenvalues are sorted by
algebraic value.
If `return_eigenvectors` is False, eigenvalues are sorted by
decreasing absolute value.
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP @ x'[i] = w'[i] * B @ x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A @ x[i] = w[i] * M @ x[i]``.
The modes are as follows:
'normal' :
OP = [A - sigma * M]^-1 @ M,
B = M,
w'[i] = 1 / (w[i] - sigma)
'buckling' :
OP = [A - sigma * M]^-1 @ A,
B = A,
w'[i] = w[i] / (w[i] - sigma)
'cayley' :
OP = [A - sigma * M]^-1 @ [A + sigma * M],
B = M,
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion).
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import eigsh
>>> identity = np.eye(13)
>>> eigenvalues, eigenvectors = eigsh(identity, k=6)
>>> eigenvalues
array([1., 1., 1., 1., 1., 1.])
>>> eigenvectors.shape
(13, 6)
"""
# complex Hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix (shape={A.shape})')
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0:
raise ValueError("k must be greater than 0.")
if k >= n:
warnings.warn("k >= N for N * N square matrix. "
"Attempting to use scipy.linalg.eigh instead.",
RuntimeWarning)
if issparse(A):
raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
"k >= N. Use scipy.linalg.eigh(A.toarray()) or"
" reduce k.")
if isinstance(A, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
"A with k >= N.")
if isinstance(M, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
"M with k >= N.")
return eigh(A, b=M, eigvals_only=not return_eigenvectors)
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
with _ARPACK_LOCK:
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
| 67,273
| 38.572941
| 89
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py
|
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, diags, rand
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack,
ArpackNoConvergence)
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
if (
which in ('LM', 'SM', 'LA')
and D_type.name == "gen-hermitian-Mc"
):
if type_char == 'F':
# missing case 1, 2, and more, from PR 14798
rtol *= 5
if type_char == 'D':
# missing more cases, from PR 14798
rtol *= 10
atol *= 10
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N, N))
if complex_:
M = M + 1j * np.random.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for {}:general, typ={}, which={}, sigma={}, "
"mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for {}:standard, typ={}, which={}, sigma={}, "
"mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
check_evecs = True
except AssertionError:
check_evecs = False
ntries += 1
if check_evecs:
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
break
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex_=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
np.random.seed(1)
n = 10
A = rand(n, n, density=0.5)
A.data *= 2
A.data -= 1
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
# Check sort order
if sigma is None:
d = w
else:
d = 1 / (w - sigma)
if which == 'LM':
# ARPACK is systematic for 'LM', but sort order
# appears not well defined for other modes
assert np.all(np.diff(abs(d)) <= 1e-6)
| 23,750
| 32.03338
| 85
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py
|
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. :doi:`10.1137/S1064827500366124`
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
in hypre and PETSc. :arxiv:`0705.2626`
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://github.com/lobpcg/blopex
"""
import warnings
import numpy as np
from scipy.linalg import (inv, eigh, cho_factor, cho_solve,
cholesky, LinAlgError)
from scipy.sparse.linalg import LinearOperator
from scipy.sparse import issparse
__all__ = ["lobpcg"]
def _report_nonhermitian(M, name):
"""
Report if `M` is not a Hermitian matrix given its type.
"""
from scipy.linalg import norm
md = M - M.T.conj()
nmd = norm(md, 1)
tol = 10 * np.finfo(M.dtype).eps
tol = max(tol, tol * norm(M, 1))
if nmd > tol:
warnings.warn(
f"Matrix {name} of the type {M.dtype} is not Hermitian: "
f"condition: {nmd} < {tol} fails.",
UserWarning, stacklevel=4
)
def _as2d(ar):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _makeMatMat(m):
if m is None:
return None
elif callable(m):
return lambda v: m(v)
else:
return lambda v: m @ v
def _matmul_inplace(x, y, verbosityLevel=0):
"""Perform 'np.matmul' in-place if possible.
If some sufficient conditions for inplace matmul are met, do so.
Otherwise try inplace update and fall back to overwrite if that fails.
"""
if x.flags["CARRAY"] and x.shape[1] == y.shape[1] and x.dtype == y.dtype:
# conditions where we can guarantee that inplace updates will work;
# i.e. x is not a view/slice, x & y have compatible dtypes, and the
# shape of the result of x @ y matches the shape of x.
np.matmul(x, y, out=x)
else:
# ideally, we'd have an exhaustive list of conditions above when
# inplace updates are possible; since we don't, we opportunistically
# try if it works, and fall back to overwriting if necessary
try:
np.matmul(x, y, out=x)
except Exception:
if verbosityLevel:
warnings.warn(
"Inplace update of x = x @ y failed, "
"x needs to be overwritten.",
UserWarning, stacklevel=3
)
x = x @ y
return x
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
"""Changes blockVectorV in-place."""
YBV = blockVectorBY.T.conj() @ blockVectorV
tmp = cho_solve(factYBY, YBV)
blockVectorV -= blockVectorY @ tmp
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None,
verbosityLevel=0):
"""in-place B-orthonormalize the given block vector using Cholesky."""
if blockVectorBV is None:
if B is None:
blockVectorBV = blockVectorV
else:
try:
blockVectorBV = B(blockVectorV)
except Exception as e:
if verbosityLevel:
warnings.warn(
f"Secondary MatMul call failed with error\n"
f"{e}\n",
UserWarning, stacklevel=3
)
return None, None, None
if blockVectorBV.shape != blockVectorV.shape:
raise ValueError(
f"The shape {blockVectorV.shape} "
f"of the orthogonalized matrix not preserved\n"
f"and changed to {blockVectorBV.shape} "
f"after multiplying by the secondary matrix.\n"
)
VBV = blockVectorV.T.conj() @ blockVectorBV
try:
# VBV is a Cholesky factor from now on...
VBV = cholesky(VBV, overwrite_a=True)
VBV = inv(VBV, overwrite_a=True)
blockVectorV = _matmul_inplace(
blockVectorV, VBV,
verbosityLevel=verbosityLevel
)
if B is not None:
blockVectorBV = _matmul_inplace(
blockVectorBV, VBV,
verbosityLevel=verbosityLevel
)
return blockVectorV, blockVectorBV, VBV
except LinAlgError:
if verbosityLevel:
warnings.warn(
"Cholesky has failed.",
UserWarning, stacklevel=3
)
return None, None, None
def _get_indx(_lambda, num, largest):
"""Get `num` indices into `_lambda` depending on `largest` option."""
ii = np.argsort(_lambda)
if largest:
ii = ii[:-num - 1:-1]
else:
ii = ii[:num]
return ii
def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel):
if verbosityLevel:
_report_nonhermitian(gramA, "gramA")
_report_nonhermitian(gramB, "gramB")
def lobpcg(
A,
X,
B=None,
M=None,
Y=None,
tol=None,
maxiter=None,
largest=True,
verbosityLevel=0,
retLambdaHistory=False,
retResidualNormsHistory=False,
restartControl=20,
):
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
LOBPCG is a preconditioned eigensolver for large real symmetric and complex
Hermitian definite generalized eigenproblems.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator, callable object}
The Hermitian linear operator of the problem, usually given by a
sparse matrix. Often called the "stiffness matrix".
X : ndarray, float32 or float64
Initial approximation to the ``k`` eigenvectors (non-sparse).
If `A` has ``shape=(n,n)`` then `X` must have ``shape=(n,k)``.
B : {sparse matrix, ndarray, LinearOperator, callable object}
Optional. By default ``B = None``, which is equivalent to identity.
The right hand side operator in a generalized eigenproblem if present.
Often called the "mass matrix". Must be Hermitian positive definite.
M : {sparse matrix, ndarray, LinearOperator, callable object}
Optional. By default ``M = None``, which is equivalent to identity.
Preconditioner aiming to accelerate convergence.
Y : ndarray, float32 or float64, default: None
An ``n-by-sizeY`` ndarray of constraints with ``sizeY < n``.
The iterations will be performed in the ``B``-orthogonal complement
of the column-space of `Y`. `Y` must be full rank if present.
tol : scalar, optional
The default is ``tol=n*sqrt(eps)``.
Solver tolerance for the stopping criterion.
maxiter : int, default: 20
Maximum number of iterations.
largest : bool, default: True
When True, solve for the largest eigenvalues, otherwise the smallest.
verbosityLevel : int, optional
By default ``verbosityLevel=0`` no output.
Controls the solver standard/screen output.
retLambdaHistory : bool, default: False
Whether to return iterative eigenvalue history.
retResidualNormsHistory : bool, default: False
Whether to return iterative history of residual norms.
restartControl : int, optional.
Iterations restart if the residuals jump ``2**restartControl`` times
compared to the smallest recorded in ``retResidualNormsHistory``.
The default is ``restartControl=20``, making the restarts rare for
backward compatibility.
Returns
-------
lambda : ndarray of the shape ``(k, )``.
Array of ``k`` approximate eigenvalues.
v : ndarray of the same shape as ``X.shape``.
An array of ``k`` approximate eigenvectors.
lambdaHistory : ndarray, optional.
The eigenvalue history, if `retLambdaHistory` is ``True``.
ResidualNormsHistory : ndarray, optional.
The history of residual norms, if `retResidualNormsHistory`
is ``True``.
Notes
-----
The iterative loop runs ``maxit=maxiter`` (20 if ``maxit=None``)
iterations at most and finishes earler if the tolerance is met.
Breaking backward compatibility with the previous version, LOBPCG
now returns the block of iterative vectors with the best accuracy rather
than the last one iterated, as a cure for possible divergence.
If ``X.dtype == np.float32`` and user-provided operations/multiplications
by `A`, `B`, and `M` all preserve the ``np.float32`` data type,
all the calculations and the output are in ``np.float32``.
The size of the iteration history output equals to the number of the best
(limited by `maxit`) iterations plus 3: initial, final, and postprocessing.
If both `retLambdaHistory` and `retResidualNormsHistory` are ``True``,
the return tuple has the following format
``(lambda, V, lambda history, residual norms history)``.
In the following ``n`` denotes the matrix size and ``k`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size ``3k`` on every
iteration by calling the dense eigensolver `eigh`, so if ``k`` is not
small enough compared to ``n``, it makes no sense to call the LOBPCG code.
Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely
break internally, so the code calls the standard function `eigh` instead.
It is not that ``n`` should be large for the LOBPCG to work, but rather the
ratio ``n / k`` should be large. It you call LOBPCG with ``k=1``
and ``n=10``, it works though ``n`` is small. The method is intended
for extremely large ``n / k``.
The convergence speed depends basically on three factors:
1. Quality of the initial approximations `X` to the seeking eigenvectors.
Randomly distributed around the origin vectors work well if no better
choice is known.
2. Relative separation of the desired eigenvalues from the rest
of the eigenvalues. One can vary ``k`` to improve the separation.
3. Proper preconditioning to shrink the spectral spread.
For example, a rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
slow, unless efficient preconditioning is used. For this specific
problem, a good simple preconditioner function would be a linear solve
for `A`, which is easy to code since `A` is tridiagonal.
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. :doi:`10.1137/S1064827500366124`
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://github.com/lobpcg/blopex
Examples
--------
Our first example is minimalistic - find the largest eigenvalue of
a diagonal matrix by solving the non-generalized eigenvalue problem
``A x = lambda x`` without constraints or preconditioning.
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from scipy.sparse.linalg import LinearOperator, aslinearoperator
>>> from scipy.sparse.linalg import lobpcg
The square matrix size is
>>> n = 100
and its diagonal entries are 1, ..., 100 defined by
>>> vals = np.arange(1, n + 1).astype(np.int16)
The first mandatory input parameter in this test is
the sparse diagonal matrix `A`
of the eigenvalue problem ``A x = lambda x`` to solve.
>>> A = spdiags(vals, 0, n, n)
>>> A = A.astype(np.int16)
>>> A.toarray()
array([[ 1, 0, 0, ..., 0, 0, 0],
[ 0, 2, 0, ..., 0, 0, 0],
[ 0, 0, 3, ..., 0, 0, 0],
...,
[ 0, 0, 0, ..., 98, 0, 0],
[ 0, 0, 0, ..., 0, 99, 0],
[ 0, 0, 0, ..., 0, 0, 100]], dtype=int16)
The second mandatory input parameter `X` is a 2D array with the
row dimension determining the number of requested eigenvalues.
`X` is an initial guess for targeted eigenvectors.
`X` must have linearly independent columns.
If no initial approximations available, randomly oriented vectors
commonly work best, e.g., with components normally distributed
around zero or uniformly distributed on the interval [-1 1].
Setting the initial approximations to dtype ``np.float32``
forces all iterative values to dtype ``np.float32`` speeding up
the run while still allowing accurate eigenvalue computations.
>>> k = 1
>>> rng = np.random.default_rng()
>>> X = rng.normal(size=(n, k))
>>> X = X.astype(np.float32)
>>> eigenvalues, _ = lobpcg(A, X, maxiter=60)
>>> eigenvalues
array([100.])
>>> eigenvalues.dtype
dtype('float32')
LOBPCG needs only access the matrix product with `A` rather
then the matrix itself. Since the matrix `A` is diagonal in
this example, one can write a function of the product
``A @ X`` using the diagonal values ``vals`` only, e.g., by
element-wise multiplication with broadcasting
>>> A_f = lambda X: vals[:, np.newaxis] * X
and use the handle ``A_f`` to this callable function as an input
>>> eigenvalues, _ = lobpcg(A_f, X, maxiter=60)
>>> eigenvalues
array([100.])
The next example illustrates computing 3 smallest eigenvalues of
the same matrix given by the function handle ``A_f`` with
constraints and preconditioning.
>>> k = 3
>>> X = rng.normal(size=(n, k))
Constraints - an optional input parameter is a 2D array comprising
of column vectors that the eigenvectors must be orthogonal to
>>> Y = np.eye(n, 3)
The preconditioner acts as the inverse of `A` in this example, but
in the reduced precision ``np.float32`` even though the initial `X`
and thus all iterates and the output are in full ``np.float64``.
>>> inv_vals = 1./vals
>>> inv_vals = inv_vals.astype(np.float32)
>>> M = lambda X: inv_vals[:, np.newaxis] * X
Let us now solve the eigenvalue problem for the matrix `A` first
without preconditioning requesting 80 iterations
>>> eigenvalues, _ = lobpcg(A_f, X, Y=Y, largest=False, maxiter=80)
>>> eigenvalues
array([4., 5., 6.])
>>> eigenvalues.dtype
dtype('float64')
With preconditioning we need only 20 iterations from the same `X`
>>> eigenvalues, _ = lobpcg(A_f, X, Y=Y, M=M, largest=False, maxiter=20)
>>> eigenvalues
array([4., 5., 6.])
Note that the vectors passed in `Y` are the eigenvectors of the 3
smallest eigenvalues. The results returned above are orthogonal to those.
Finally, the primary matrix `A` may be indefinite, e.g., after shifting
``vals`` by 50 from 1, ..., 100 to -49, ..., 50, we still can compute
the 3 smallest or largest eigenvalues.
>>> vals = vals - 50
>>> X = rng.normal(size=(n, k))
>>> eigenvalues, _ = lobpcg(A_f, X, largest=False, maxiter=99)
>>> eigenvalues
array([-49., -48., -47.])
>>> eigenvalues, _ = lobpcg(A_f, X, largest=True, maxiter=99)
>>> eigenvalues
array([50., 49., 48.])
"""
blockVectorX = X
bestblockVectorX = blockVectorX
blockVectorY = Y
residualTolerance = tol
if maxiter is None:
maxiter = 20
bestIterationNumber = maxiter
sizeY = 0
if blockVectorY is not None:
if len(blockVectorY.shape) != 2:
warnings.warn(
f"Expected rank-2 array for argument Y, instead got "
f"{len(blockVectorY.shape)}, "
f"so ignore it and use no constraints.",
UserWarning, stacklevel=2
)
blockVectorY = None
else:
sizeY = blockVectorY.shape[1]
# Block size.
if blockVectorX is None:
raise ValueError("The mandatory initial matrix X cannot be None")
if len(blockVectorX.shape) != 2:
raise ValueError("expected rank-2 array for argument X")
n, sizeX = blockVectorX.shape
# Data type of iterates, determined by X, must be inexact
if not np.issubdtype(blockVectorX.dtype, np.inexact):
warnings.warn(
f"Data type for argument X is {blockVectorX.dtype}, "
f"which is not inexact, so casted to np.float32.",
UserWarning, stacklevel=2
)
blockVectorX = np.asarray(blockVectorX, dtype=np.float32)
if retLambdaHistory:
lambdaHistory = np.zeros((maxiter + 3, sizeX),
dtype=blockVectorX.dtype)
if retResidualNormsHistory:
residualNormsHistory = np.zeros((maxiter + 3, sizeX),
dtype=blockVectorX.dtype)
if verbosityLevel:
aux = "Solving "
if B is None:
aux += "standard"
else:
aux += "generalized"
aux += " eigenvalue problem with"
if M is None:
aux += "out"
aux += " preconditioning\n\n"
aux += "matrix size %d\n" % n
aux += "block size %d\n\n" % sizeX
if blockVectorY is None:
aux += "No constraints\n\n"
else:
if sizeY > 1:
aux += "%d constraints\n\n" % sizeY
else:
aux += "%d constraint\n\n" % sizeY
print(aux)
if (n - sizeY) < (5 * sizeX):
warnings.warn(
f"The problem size {n} minus the constraints size {sizeY} "
f"is too small relative to the block size {sizeX}. "
f"Using a dense eigensolver instead of LOBPCG iterations."
f"No output of the history of the iterations.",
UserWarning, stacklevel=2
)
sizeX = min(sizeX, n)
if blockVectorY is not None:
raise NotImplementedError(
"The dense eigensolver does not support constraints."
)
# Define the closed range of indices of eigenvalues to return.
if largest:
eigvals = (n - sizeX, n - 1)
else:
eigvals = (0, sizeX - 1)
try:
if isinstance(A, LinearOperator):
A = A(np.eye(n, dtype=int))
elif callable(A):
A = A(np.eye(n, dtype=int))
if A.shape != (n, n):
raise ValueError(
f"The shape {A.shape} of the primary matrix\n"
f"defined by a callable object is wrong.\n"
)
elif issparse(A):
A = A.toarray()
else:
A = np.asarray(A)
except Exception as e:
raise Exception(
f"Primary MatMul call failed with error\n"
f"{e}\n")
if B is not None:
try:
if isinstance(B, LinearOperator):
B = B(np.eye(n, dtype=int))
elif callable(B):
B = B(np.eye(n, dtype=int))
if B.shape != (n, n):
raise ValueError(
f"The shape {B.shape} of the secondary matrix\n"
f"defined by a callable object is wrong.\n"
)
elif issparse(B):
B = B.toarray()
else:
B = np.asarray(B)
except Exception as e:
raise Exception(
f"Secondary MatMul call failed with error\n"
f"{e}\n")
try:
vals, vecs = eigh(A,
B,
subset_by_index=eigvals,
check_finite=False)
if largest:
# Reverse order to be compatible with eigs() in 'LM' mode.
vals = vals[::-1]
vecs = vecs[:, ::-1]
return vals, vecs
except Exception as e:
raise Exception(
f"Dense eigensolver failed with error\n"
f"{e}\n"
)
if (residualTolerance is None) or (residualTolerance <= 0.0):
residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n
A = _makeMatMat(A)
B = _makeMatMat(B)
M = _makeMatMat(M)
# Apply constraints to X.
if blockVectorY is not None:
if B is not None:
blockVectorBY = B(blockVectorY)
if blockVectorBY.shape != blockVectorY.shape:
raise ValueError(
f"The shape {blockVectorY.shape} "
f"of the constraint not preserved\n"
f"and changed to {blockVectorBY.shape} "
f"after multiplying by the secondary matrix.\n"
)
else:
blockVectorBY = blockVectorY
# gramYBY is a dense array.
gramYBY = blockVectorY.T.conj() @ blockVectorBY
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = cho_factor(gramYBY, overwrite_a=True)
except LinAlgError as e:
raise ValueError("Linearly dependent constraints") from e
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
##
# B-orthonormalize X.
blockVectorX, blockVectorBX, _ = _b_orthonormalize(
B, blockVectorX, verbosityLevel=verbosityLevel)
if blockVectorX is None:
raise ValueError("Linearly dependent initial approximations")
##
# Compute the initial Ritz vectors: solve the eigenproblem.
blockVectorAX = A(blockVectorX)
if blockVectorAX.shape != blockVectorX.shape:
raise ValueError(
f"The shape {blockVectorX.shape} "
f"of the initial approximations not preserved\n"
f"and changed to {blockVectorAX.shape} "
f"after multiplying by the primary matrix.\n"
)
gramXAX = blockVectorX.T.conj() @ blockVectorAX
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
ii = _get_indx(_lambda, sizeX, largest)
_lambda = _lambda[ii]
if retLambdaHistory:
lambdaHistory[0, :] = _lambda
eigBlockVector = np.asarray(eigBlockVector[:, ii])
blockVectorX = _matmul_inplace(
blockVectorX, eigBlockVector,
verbosityLevel=verbosityLevel
)
blockVectorAX = _matmul_inplace(
blockVectorAX, eigBlockVector,
verbosityLevel=verbosityLevel
)
if B is not None:
blockVectorBX = _matmul_inplace(
blockVectorBX, eigBlockVector,
verbosityLevel=verbosityLevel
)
##
# Active index set.
activeMask = np.ones((sizeX,), dtype=bool)
##
# Main iteration loop.
blockVectorP = None # set during iteration
blockVectorAP = None
blockVectorBP = None
smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max)
iterationNumber = -1
restart = True
forcedRestart = False
explicitGramFlag = False
while iterationNumber < maxiter:
iterationNumber += 1
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(np.abs(aux))
if retResidualNormsHistory:
residualNormsHistory[iterationNumber, :] = residualNorms
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
if residualNorm < smallestResidualNorm:
smallestResidualNorm = residualNorm
bestIterationNumber = iterationNumber
bestblockVectorX = blockVectorX
elif residualNorm > 2**restartControl * smallestResidualNorm:
forcedRestart = True
blockVectorAX = A(blockVectorX)
if blockVectorAX.shape != blockVectorX.shape:
raise ValueError(
f"The shape {blockVectorX.shape} "
f"of the restarted iterate not preserved\n"
f"and changed to {blockVectorAX.shape} "
f"after multiplying by the primary matrix.\n"
)
if B is not None:
blockVectorBX = B(blockVectorX)
if blockVectorBX.shape != blockVectorX.shape:
raise ValueError(
f"The shape {blockVectorX.shape} "
f"of the restarted iterate not preserved\n"
f"and changed to {blockVectorBX.shape} "
f"after multiplying by the secondary matrix.\n"
)
ii = np.where(residualNorms > residualTolerance, True, False)
activeMask = activeMask & ii
currentBlockSize = activeMask.sum()
if verbosityLevel:
print(f"iteration {iterationNumber}")
print(f"current block size: {currentBlockSize}")
print(f"eigenvalue(s):\n{_lambda}")
print(f"residual norm(s):\n{residualNorms}")
if currentBlockSize == 0:
break
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
if iterationNumber > 0:
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
if B is not None:
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
if M is not None:
# Apply preconditioner T to the active residuals.
activeBlockVectorR = M(activeBlockVectorR)
##
# Apply constraints to the preconditioned residuals.
if blockVectorY is not None:
_applyConstraints(activeBlockVectorR,
gramYBY,
blockVectorBY,
blockVectorY)
##
# B-orthogonalize the preconditioned residuals to X.
if B is not None:
activeBlockVectorR = activeBlockVectorR - (
blockVectorX @
(blockVectorBX.T.conj() @ activeBlockVectorR)
)
else:
activeBlockVectorR = activeBlockVectorR - (
blockVectorX @
(blockVectorX.T.conj() @ activeBlockVectorR)
)
##
# B-orthonormalize the preconditioned residuals.
aux = _b_orthonormalize(
B, activeBlockVectorR, verbosityLevel=verbosityLevel)
activeBlockVectorR, activeBlockVectorBR, _ = aux
if activeBlockVectorR is None:
warnings.warn(
f"Failed at iteration {iterationNumber} with accuracies "
f"{residualNorms}\n not reaching the requested "
f"tolerance {residualTolerance}.",
UserWarning, stacklevel=2
)
break
activeBlockVectorAR = A(activeBlockVectorR)
if iterationNumber > 0:
if B is not None:
aux = _b_orthonormalize(
B, activeBlockVectorP, activeBlockVectorBP,
verbosityLevel=verbosityLevel
)
activeBlockVectorP, activeBlockVectorBP, invR = aux
else:
aux = _b_orthonormalize(B, activeBlockVectorP,
verbosityLevel=verbosityLevel)
activeBlockVectorP, _, invR = aux
# Function _b_orthonormalize returns None if Cholesky fails
if activeBlockVectorP is not None:
activeBlockVectorAP = _matmul_inplace(
activeBlockVectorAP, invR,
verbosityLevel=verbosityLevel
)
restart = forcedRestart
else:
restart = True
##
# Perform the Rayleigh Ritz Procedure:
# Compute symmetric Gram matrices:
if activeBlockVectorAR.dtype == "float32":
myeps = 1
else:
myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps)
if residualNorms.max() > myeps and not explicitGramFlag:
explicitGramFlag = False
else:
# Once explicitGramFlag, forever explicitGramFlag.
explicitGramFlag = True
# Shared memory assingments to simplify the code
if B is None:
blockVectorBX = blockVectorX
activeBlockVectorBR = activeBlockVectorR
if not restart:
activeBlockVectorBP = activeBlockVectorP
# Common submatrices:
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
gramDtype = activeBlockVectorAR.dtype
if explicitGramFlag:
gramRAR = (gramRAR + gramRAR.T.conj()) / 2
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
else:
gramXAX = np.diag(_lambda).astype(gramDtype)
gramXBX = np.eye(sizeX, dtype=gramDtype)
gramRBR = np.eye(currentBlockSize, dtype=gramDtype)
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype)
if not restart:
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
if explicitGramFlag:
gramPAP = (gramPAP + gramPAP.T.conj()) / 2
gramPBP = np.dot(activeBlockVectorP.T.conj(),
activeBlockVectorBP)
else:
gramPBP = np.eye(currentBlockSize, dtype=gramDtype)
gramA = np.block(
[
[gramXAX, gramXAR, gramXAP],
[gramXAR.T.conj(), gramRAR, gramRAP],
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
]
)
gramB = np.block(
[
[gramXBX, gramXBR, gramXBP],
[gramXBR.T.conj(), gramRBR, gramRBP],
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
]
)
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
try:
_lambda, eigBlockVector = eigh(gramA,
gramB,
check_finite=False)
except LinAlgError as e:
# raise ValueError("eigh failed in lobpcg iterations") from e
if verbosityLevel:
warnings.warn(
f"eigh failed at iteration {iterationNumber} \n"
f"with error {e} causing a restart.\n",
UserWarning, stacklevel=2
)
# try again after dropping the direction vectors P from RR
restart = True
if restart:
gramA = np.block([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
gramB = np.block([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
try:
_lambda, eigBlockVector = eigh(gramA,
gramB,
check_finite=False)
except LinAlgError as e:
# raise ValueError("eigh failed in lobpcg iterations") from e
warnings.warn(
f"eigh failed at iteration {iterationNumber} with error\n"
f"{e}\n",
UserWarning, stacklevel=2
)
break
ii = _get_indx(_lambda, sizeX, largest)
_lambda = _lambda[ii]
eigBlockVector = eigBlockVector[:, ii]
if retLambdaHistory:
lambdaHistory[iterationNumber + 1, :] = _lambda
# Compute Ritz vectors.
if B is not None:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:
sizeX + currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
else:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:
sizeX + currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorP, blockVectorAP = pp, app
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(np.abs(aux))
# Use old lambda in case of early loop exit.
if retLambdaHistory:
lambdaHistory[iterationNumber + 1, :] = _lambda
if retResidualNormsHistory:
residualNormsHistory[iterationNumber + 1, :] = residualNorms
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
if residualNorm < smallestResidualNorm:
smallestResidualNorm = residualNorm
bestIterationNumber = iterationNumber + 1
bestblockVectorX = blockVectorX
if np.max(np.abs(residualNorms)) > residualTolerance:
warnings.warn(
f"Exited at iteration {iterationNumber} with accuracies \n"
f"{residualNorms}\n"
f"not reaching the requested tolerance {residualTolerance}.\n"
f"Use iteration {bestIterationNumber} instead with accuracy \n"
f"{smallestResidualNorm}.\n",
UserWarning, stacklevel=2
)
if verbosityLevel:
print(f"Final iterative eigenvalue(s):\n{_lambda}")
print(f"Final iterative residual norm(s):\n{residualNorms}")
blockVectorX = bestblockVectorX
# Making eigenvectors "exactly" satisfy the blockVectorY constrains
if blockVectorY is not None:
_applyConstraints(blockVectorX,
gramYBY,
blockVectorBY,
blockVectorY)
# Making eigenvectors "exactly" othonormalized by final "exact" RR
blockVectorAX = A(blockVectorX)
if blockVectorAX.shape != blockVectorX.shape:
raise ValueError(
f"The shape {blockVectorX.shape} "
f"of the postprocessing iterate not preserved\n"
f"and changed to {blockVectorAX.shape} "
f"after multiplying by the primary matrix.\n"
)
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
blockVectorBX = blockVectorX
if B is not None:
blockVectorBX = B(blockVectorX)
if blockVectorBX.shape != blockVectorX.shape:
raise ValueError(
f"The shape {blockVectorX.shape} "
f"of the postprocessing iterate not preserved\n"
f"and changed to {blockVectorBX.shape} "
f"after multiplying by the secondary matrix.\n"
)
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
_handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel)
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
gramXBX = (gramXBX + gramXBX.T.conj()) / 2
try:
_lambda, eigBlockVector = eigh(gramXAX,
gramXBX,
check_finite=False)
except LinAlgError as e:
raise ValueError("eigh has failed in lobpcg postprocessing") from e
ii = _get_indx(_lambda, sizeX, largest)
_lambda = _lambda[ii]
eigBlockVector = np.asarray(eigBlockVector[:, ii])
blockVectorX = np.dot(blockVectorX, eigBlockVector)
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
if B is not None:
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(np.abs(aux))
if retLambdaHistory:
lambdaHistory[bestIterationNumber + 1, :] = _lambda
if retResidualNormsHistory:
residualNormsHistory[bestIterationNumber + 1, :] = residualNorms
if retLambdaHistory:
lambdaHistory = lambdaHistory[
: bestIterationNumber + 2, :]
if retResidualNormsHistory:
residualNormsHistory = residualNormsHistory[
: bestIterationNumber + 2, :]
if np.max(np.abs(residualNorms)) > residualTolerance:
warnings.warn(
f"Exited postprocessing with accuracies \n"
f"{residualNorms}\n"
f"not reaching the requested tolerance {residualTolerance}.",
UserWarning, stacklevel=2
)
if verbosityLevel:
print(f"Final postprocessing eigenvalue(s):\n{_lambda}")
print(f"Final residual norm(s):\n{residualNorms}")
if retLambdaHistory:
lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0])
lambdaHistory = [np.squeeze(i) for i in lambdaHistory]
if retResidualNormsHistory:
residualNormsHistory = np.vsplit(residualNormsHistory,
np.shape(residualNormsHistory)[0])
residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory]
if retLambdaHistory:
if retResidualNormsHistory:
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
else:
return _lambda, blockVectorX, lambdaHistory
else:
if retResidualNormsHistory:
return _lambda, blockVectorX, residualNormsHistory
else:
return _lambda, blockVectorX
| 40,882
| 36.784658
| 79
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/lobpcg/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lobpcg',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 343
| 25.461538
| 60
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/lobpcg/__init__.py
|
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
Call the function lobpcg - see help for lobpcg.lobpcg.
"""
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 420
| 23.764706
| 76
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py
|
""" Test functions for the sparse.linalg._eigen.lobpcg module
"""
import itertools
import platform
import sys
import pytest
import numpy as np
from numpy import ones, r_, diag
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from scipy import sparse
from scipy.linalg import eig, eigh, toeplitz, orth
from scipy.sparse import spdiags, diags, eye, csr_matrix
from scipy.sparse.linalg import eigs, LinearOperator
from scipy.sparse.linalg._eigen.lobpcg import lobpcg
from scipy.sparse.linalg._eigen.lobpcg.lobpcg import _b_orthonormalize
_IS_32BIT = (sys.maxsize < 2**32)
INT_DTYPES = {np.intc, np.int_, np.longlong, np.uintc, np.uint, np.ulonglong}
# np.half is unsupported on many test systems so excluded
REAL_DTYPES = {np.single, np.double, np.longdouble}
COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble}
# use sorted tuple to ensure fixed order of tests
VDTYPES = tuple(sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
MDTYPES = tuple(sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
def sign_align(A, B):
"""Align signs of columns of A match those of B: column-wise remove
sign of A by multiplying with its sign then multiply in sign of B.
"""
return np.array([col_A * np.sign(col_A[0]) * np.sign(col_B[0])
for col_A, col_B in zip(A.T, B.T)]).T
def ElasticRod(n):
"""Build the matrices for the generalized eigenvalue problem of the
fixed-free elastic rod vibration model.
"""
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
return A, B
def MikotaPair(n):
"""Build a pair of full diagonal matrices for the generalized eigenvalue
problem. The Mikota pair acts as a nice test since the eigenvalues are the
squares of the integers n, n=1,2,...
"""
x = np.arange(1, n+1)
B = diag(1./x)
y = np.arange(n-1, 0, -1)
z = np.arange(2*n-1, 0, -2)
A = diag(z)-diag(y, -1)-diag(y, 1)
return A, B
def compare_solutions(A, B, m):
"""Check eig vs. lobpcg consistency.
"""
n = A.shape[0]
rnd = np.random.RandomState(0)
V = rnd.random((n, m))
X = orth(V)
eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
eigvals.sort()
w, _ = eig(A, b=B)
w.sort()
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
def test_Small():
A, B = ElasticRod(10)
with pytest.warns(UserWarning, match="The problem size"):
compare_solutions(A, B, 10)
A, B = MikotaPair(10)
with pytest.warns(UserWarning, match="The problem size"):
compare_solutions(A, B, 10)
def test_ElasticRod():
A, B = ElasticRod(20)
with pytest.warns(UserWarning, match="Exited at iteration"):
compare_solutions(A, B, 2)
def test_MikotaPair():
A, B = MikotaPair(20)
compare_solutions(A, B, 2)
@pytest.mark.parametrize("n", [50])
@pytest.mark.parametrize("m", [1, 2, 10])
@pytest.mark.parametrize("Vdtype", REAL_DTYPES)
@pytest.mark.parametrize("Bdtype", REAL_DTYPES)
@pytest.mark.parametrize("BVdtype", REAL_DTYPES)
def test_b_orthonormalize(n, m, Vdtype, Bdtype, BVdtype):
"""Test B-orthonormalization by Cholesky with callable 'B'.
The function '_b_orthonormalize' is key in LOBPCG but may
lead to numerical instabilities. The input vectors are often
badly scaled, so the function needs scale-invariant Cholesky;
see https://netlib.org/lapack/lawnspdf/lawn14.pdf.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((n, m)).astype(Vdtype)
Xcopy = np.copy(X)
vals = np.arange(1, n+1, dtype=float)
B = diags([vals], [0], (n, n)).astype(Bdtype)
BX = B @ X
BX = BX.astype(BVdtype)
dtype = min(X.dtype, B.dtype, BX.dtype)
# np.longdouble tol cannot be achieved on most systems
atol = m * n * max(np.finfo(dtype).eps, np.finfo(np.double).eps)
Xo, BXo, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
# Check in-place.
assert_equal(X, Xo)
assert_equal(id(X), id(Xo))
assert_equal(BX, BXo)
assert_equal(id(BX), id(BXo))
# Check BXo.
assert_allclose(B @ Xo, BXo, atol=atol, rtol=atol)
# Check B-orthonormality
assert_allclose(Xo.T.conj() @ B @ Xo, np.identity(m),
atol=atol, rtol=atol)
# Repeat without BX in outputs
X = np.copy(Xcopy)
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X)
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
# Check in-place.
assert_equal(X, Xo1)
assert_equal(id(X), id(Xo1))
# Check BXo1.
assert_allclose(B @ Xo1, BXo1, atol=atol, rtol=atol)
# Introduce column-scaling in X.
scaling = 1.0 / np.geomspace(10, 1e10, num=m)
X = Xcopy * scaling
X = X.astype(Vdtype)
BX = B @ X
BX = BX.astype(BVdtype)
# Check scaling-invariance of Cholesky-based orthonormalization
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
# The output should be the same, up the signs of the columns.
Xo1 = sign_align(Xo1, Xo)
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
BXo1 = sign_align(BXo1, BXo)
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
@pytest.mark.filterwarnings("ignore:Exited at iteration 0")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_nonhermitian_warning(capsys):
"""Check the warning of a Ritz matrix being not Hermitian
by feeding a non-Hermitian input matrix.
Also check stdout since verbosityLevel=1 and lack of stderr.
"""
n = 10
X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
A = np.arange(n * n).reshape(n, n).astype(np.float32)
with pytest.warns(UserWarning, match="Matrix gramA"):
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
out, err = capsys.readouterr() # Capture output
assert out.startswith("Solving standard eigenvalue") # Test stdout
assert err == '' # Test empty stderr
# Make the matrix symmetric and the UserWarning dissappears.
A += A.T
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
out, err = capsys.readouterr() # Capture output
assert out.startswith("Solving standard eigenvalue") # Test stdout
assert err == '' # Test empty stderr
def test_regression():
"""Check the eigenvalue of the identity matrix is one.
"""
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, _ = lobpcg(A, X)
assert_allclose(w, [1])
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize('n, m, m_excluded', [(30, 4, 3), (4, 2, 0)])
def test_diagonal(n, m, m_excluded):
"""Test ``m - m_excluded`` eigenvalues and eigenvectors of
diagonal matrices of the size ``n`` varying matrix formats:
dense array, spare matrix, and ``LinearOperator`` for both
matrixes in the generalized eigenvalue problem ``Av = cBv``
and for the preconditioner.
"""
rnd = np.random.RandomState(0)
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# A is the diagonal matrix whose entries are 1,...n,
# B is the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A_s = diags([vals], [0], (n, n))
A_a = A_s.toarray()
def A_f(x):
return A_s @ x
A_lo = LinearOperator(matvec=A_f,
matmat=A_f,
shape=(n, n), dtype=float)
B_a = eye(n)
B_s = csr_matrix(B_a)
def B_f(x):
return B_a @ x
B_lo = LinearOperator(matvec=B_f,
matmat=B_f,
shape=(n, n), dtype=float)
# Let the preconditioner M be the inverse of A.
M_s = diags([1./vals], [0], (n, n))
M_a = M_s.toarray()
def M_f(x):
return M_s @ x
M_lo = LinearOperator(matvec=M_f,
matmat=M_f,
shape=(n, n), dtype=float)
# Pick random initial vectors.
X = rnd.normal(size=(n, m))
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
if m_excluded > 0:
Y = np.eye(n, m_excluded)
else:
Y = None
for A in [A_a, A_s, A_lo]:
for B in [B_a, B_s, B_lo]:
for M in [M_a, M_s, M_lo]:
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
maxiter=40, largest=False)
assert_allclose(eigvals, np.arange(1+m_excluded,
1+m_excluded+m))
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
"""Check if the eigenvalue residual is small.
"""
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
"""Check the Fiedler vector computation.
"""
# This is not necessarily the recommended way to find the Fiedler vector.
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, _ = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
"""Check the dense workaround path for small matrices.
"""
# This triggers the dense path because 8 < 2*5.
with pytest.warns(UserWarning, match="The problem size"):
_check_fiedler(8, 2)
def test_fiedler_large_12():
"""Check the dense workaround path avoided for non-small matrices.
"""
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
@pytest.mark.filterwarnings("ignore:Failed at iteration")
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_failure_to_run_iterations():
"""Check that the code exits gracefully without breaking. Issue #10974.
The code may or not issue a warning, filtered out. Issue #15935, #17954.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((100, 10))
A = X @ X.T
Q = rnd.standard_normal((X.shape[0], 4))
eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12)
assert np.max(eigenvalues) > 0
def test_failure_to_run_iterations_nonsymmetric():
"""Check that the code exists gracefully without breaking
if the matrix in not symmetric.
"""
A = np.zeros((10, 10))
A[0, 1] = 1
Q = np.ones((10, 1))
with pytest.warns(UserWarning, match="Exited at iteration 2"):
eigenvalues, _ = lobpcg(A, Q, maxiter=20)
assert np.max(eigenvalues) > 0
@pytest.mark.filterwarnings("ignore:The problem size")
def test_hermitian():
"""Check complex-value Hermitian cases.
"""
rnd = np.random.RandomState(0)
sizes = [3, 12]
ks = [1, 2]
gens = [True, False]
for s, k, gen, dh, dx, db in (
itertools.product(sizes, ks, gens, gens, gens, gens)
):
H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
H = 10 * np.eye(s) + H + H.T.conj()
H = H.astype(np.complex128) if dh else H.astype(np.complex64)
X = rnd.standard_normal((s, k))
X = X + 1.j * rnd.standard_normal((s, k))
X = X.astype(np.complex128) if dx else X.astype(np.complex64)
if not gen:
B = np.eye(s)
w, v = lobpcg(H, X, maxiter=99, verbosityLevel=0)
# Also test mixing complex H with real B.
wb, _ = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
assert_allclose(w, wb, rtol=1e-6)
w0, _ = eigh(H)
else:
B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
B = 10 * np.eye(s) + B.dot(B.T.conj())
B = B.astype(np.complex128) if db else B.astype(np.complex64)
w, v = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
w0, _ = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
/ np.linalg.norm(H.dot(vx)),
0, atol=5e-2, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
# The n=5 case tests the alternative small matrix code path that uses eigh().
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
def test_eigs_consistency(n, atol):
"""Check eigs vs. lobpcg consistency.
"""
vals = np.arange(1, n+1, dtype=np.float64)
A = spdiags(vals, 0, n, n)
rnd = np.random.RandomState(0)
X = rnd.standard_normal((n, 2))
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
vals, _ = eigs(A, k=2)
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
def test_verbosity():
"""Check that nonzero verbosity level code runs.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((10, 10))
A = X @ X.T
Q = rnd.standard_normal((X.shape[0], 1))
with pytest.warns(UserWarning, match="Exited at iteration"):
_, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
reason="tolerance violation on windows")
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_tolerance_float32():
"""Check lobpcg for attainable tolerance in float32.
"""
rnd = np.random.RandomState(0)
n = 50
m = 3
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = rnd.standard_normal((n, m))
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
@pytest.mark.parametrize("vdtype", VDTYPES)
@pytest.mark.parametrize("mdtype", MDTYPES)
@pytest.mark.parametrize("arr_type", [np.array,
sparse.csr_matrix,
sparse.coo_matrix])
def test_dtypes(vdtype, mdtype, arr_type):
"""Test lobpcg in various dtypes.
"""
rnd = np.random.RandomState(0)
n = 12
m = 2
A = arr_type(np.diag(np.arange(1, n + 1)).astype(mdtype))
X = rnd.random((n, m))
X = X.astype(vdtype)
eigvals, eigvecs = lobpcg(A, X, tol=1e-2, largest=False)
assert_allclose(eigvals, np.arange(1, 1 + m), atol=1e-1)
# eigenvectors must be nearly real in any case
assert_allclose(np.sum(np.abs(eigvecs - eigvecs.conj())), 0, atol=1e-2)
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_inplace_warning():
"""Check lobpcg gives a warning in '_b_orthonormalize'
that in-place orthogonalization is impossible due to dtype mismatch.
"""
rnd = np.random.RandomState(0)
n = 6
m = 1
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.cdouble)
X = rnd.standard_normal((n, m))
with pytest.warns(UserWarning, match="Inplace update"):
eigvals, _ = lobpcg(A, X, maxiter=2, verbosityLevel=1)
def test_maxit():
"""Check lobpcg if maxit=maxiter runs maxiter iterations and
if maxit=None runs 20 iterations (the default)
by checking the size of the iteration history output, which should
be the number of iterations plus 3 (initial, final, and postprocessing)
typically when maxiter is small and the choice of the best is passive.
"""
rnd = np.random.RandomState(0)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = rnd.standard_normal((n, m))
X = X.astype(np.float64)
for maxiter in range(1, 4):
with pytest.warns(UserWarning, match="Exited at iteration"):
_, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter,
retLambdaHistory=True,
retResidualNormsHistory=True)
assert_allclose(np.shape(l_h)[0], maxiter+3)
assert_allclose(np.shape(r_h)[0], maxiter+3)
with pytest.warns(UserWarning, match="Exited at iteration"):
l, _, l_h, r_h = lobpcg(A, X, tol=1e-8,
retLambdaHistory=True,
retResidualNormsHistory=True)
assert_allclose(np.shape(l_h)[0], 20+3)
assert_allclose(np.shape(r_h)[0], 20+3)
# Check that eigenvalue output is the last one in history
assert_allclose(l, l_h[-1])
# Make sure that both history outputs are lists
assert isinstance(l_h, list)
assert isinstance(r_h, list)
# Make sure that both history lists are arrays-like
assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h)))
assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h)))
@pytest.mark.slow
@pytest.mark.parametrize("n", [15])
@pytest.mark.parametrize("m", [1, 2])
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_diagonal_data_types(n, m):
"""Check lobpcg for diagonal matrices for all matrix types.
Constraints are imposed, so a dense eigensolver eig cannot run.
"""
rnd = np.random.RandomState(0)
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A and B to be diagonal.
vals = np.arange(1, n + 1)
# list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
list_sparse_format = ['coo']
sparse_formats = len(list_sparse_format)
for s_f_i, s_f in enumerate(list_sparse_format):
As64 = diags([vals * vals], [0], (n, n), format=s_f)
As32 = As64.astype(np.float32)
Af64 = As64.toarray()
Af32 = Af64.astype(np.float32)
def As32f(x):
return As32 @ x
As32LO = LinearOperator(matvec=As32f,
matmat=As32f,
shape=(n, n),
dtype=As32.dtype)
listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v]
Bs64 = diags([vals], [0], (n, n), format=s_f)
Bf64 = Bs64.toarray()
Bs32 = Bs64.astype(np.float32)
def Bs32f(x):
return Bs32 @ x
Bs32LO = LinearOperator(matvec=Bs32f,
matmat=Bs32f,
shape=(n, n),
dtype=Bs32.dtype)
listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v]
# Define the preconditioner function as LinearOperator.
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
def Ms64precond(x):
return Ms64 @ x
Ms64precondLO = LinearOperator(matvec=Ms64precond,
matmat=Ms64precond,
shape=(n, n),
dtype=Ms64.dtype)
Mf64 = Ms64.toarray()
def Mf64precond(x):
return Mf64 @ x
Mf64precondLO = LinearOperator(matvec=Mf64precond,
matmat=Mf64precond,
shape=(n, n),
dtype=Mf64.dtype)
Ms32 = Ms64.astype(np.float32)
def Ms32precond(x):
return Ms32 @ x
Ms32precondLO = LinearOperator(matvec=Ms32precond,
matmat=Ms32precond,
shape=(n, n),
dtype=Ms32.dtype)
Mf32 = Ms32.toarray()
def Mf32precond(x):
return Mf32 @ x
Mf32precondLO = LinearOperator(matvec=Mf32precond,
matmat=Mf32precond,
shape=(n, n),
dtype=Mf32.dtype)
listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond,
Ms32, Ms32precondLO, Mf32precondLO, Ms32precond]
# Setup matrix of the initial approximation to the eigenvectors
# (cannot be sparse array).
Xf64 = rnd.random((n, m))
Xf32 = Xf64.astype(np.float32)
listX = [Xf64, Xf32]
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors (cannot be sparse array).
m_excluded = 3
Yf64 = np.eye(n, m_excluded, dtype=float)
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
listY = [Yf64, Yf32]
tests = list(itertools.product(listA, listB, listM, listX, listY))
# This is one of the slower tests because there are >1,000 configs
# to test here, instead of checking product of all input, output types
# test each configuration for the first sparse format, and then
# for one additional sparse format. this takes 2/7=30% as long as
# testing all configurations for all sparse formats.
if s_f_i > 0:
tests = tests[s_f_i - 1::sparse_formats-1]
for A, B, M, X, Y in tests:
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
maxiter=100, largest=False)
assert_allclose(eigvals,
np.arange(1 + m_excluded, 1 + m_excluded + m),
atol=1e-5)
| 23,518
| 35.691108
| 80
|
py
|
scipy
|
scipy-main/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/io/mmio.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io` namespace for importing the functions
# included below.
import warnings
from . import _mmio
__all__ = [ # noqa: F822
'mminfo', 'mmread', 'mmwrite', 'MMFile',
'coo_matrix', 'isspmatrix', 'asstr'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.mmio is deprecated and has no attribute "
f"{name}. Try looking in scipy.io instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
"the `scipy.io.mmio` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mmio, name)
| 779
| 25.896552
| 76
|
py
|
scipy
|
scipy-main/scipy/io/_netcdf.py
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
Only NetCDF3 is supported here; for NetCDF4 see
`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
which has a similar API.
"""
# TODO:
# * properly implement ``_FillValue``.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file', 'netcdf_variable']
import warnings
import weakref
from operator import mul
from platform import python_implementation
import mmap as mm
import numpy as np
from numpy import frombuffer, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
IS_PYPY = python_implementation() == 'PyPy'
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
FILL_BYTE = b'\x81'
FILL_CHAR = b'\x00'
FILL_SHORT = b'\x80\x01'
FILL_INT = b'\x80\x00\x00\x01'
FILL_FLOAT = b'\x7C\xF0\x00\x00'
FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
FILLMAP = {NC_BYTE: FILL_BYTE,
NC_CHAR: FILL_CHAR,
NC_SHORT: FILL_SHORT,
NC_INT: FILL_INT,
NC_FLOAT: FILL_FLOAT,
NC_DOUBLE: FILL_DOUBLE}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file:
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__
for more info.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<https://www.unidata.ucar.edu/software/netcdf/guide_toc.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf_file
>>> import numpy as np
>>> f = netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``arange(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf_file
>>> f = netcdf_file('simple.nc', 'r')
>>> print(f.history)
b'Created for a test'
>>> time = f.variables['time']
>>> print(time.units)
b'days since 2008-01-01'
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf_file
>>> with netcdf_file('simple.nc', 'r') as f:
... print(f.history)
b'Created for a test'
"""
def __init__(self, filename, mode='r', mmap=None, version=1,
maskandscale=False):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
# Mmapped files on PyPy cannot be usually closed
# before the GC runs, so it's better to use mmap=False
# as the default.
mmap = (not IS_PYPY)
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.maskandscale = maskandscale
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = {}
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if hasattr(self, 'fp') and not self.fp.closed:
try:
self.flush()
finally:
self.variables = {}
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
if length is None and self._dims:
raise ValueError("Only first dimension may be unlimited!")
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions,
maskandscale=self.maskandscale)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tobytes())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_att_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(nc_type)
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tobytes())
count = var.data.size * var.data.itemsize
self._write_var_padding(var, var._vsize - count)
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
var.data.resize(shape)
except ValueError:
var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tobytes())
# Padding
count = rec.size * rec.itemsize
self._write_var_padding(var, var._vsize - count)
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_var_padding(self, var, size):
encoded_fill_value = var._get_encoded_fill_value()
num_fills = size // len(encoded_fill_value)
self.fp.write(encoded_fill_value * num_fills)
def _write_att_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, (str, bytes)):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(nc_type)
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tobytes())
count = values.size * values.itemsize
self.fp.write(b'\x00' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = self._unpack_string().decode('latin1')
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = self._unpack_string().decode('latin1')
attributes[name] = self._read_att_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = frombuffer(self.fp.read(a_size), dtype=dtype_
).copy()
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes,
maskandscale=self.maskandscale)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
dtype=dtypes).copy()
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = self._unpack_string().decode('latin1')
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_att_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode != 'c':
values = frombuffer(values, dtype='>%s' % typecode).copy()
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tobytes())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tobytes())
def _unpack_int64(self):
return frombuffer(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(s.encode('latin1'))
self.fp.write(b'\x00' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable:
"""
A data object for netcdf files.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions,
attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of NumPy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (e.g., 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (e.g., 8 for float64).
"""
return self._size
def __getitem__(self, index):
if not self.maskandscale:
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if add_offset is not None or scale_factor is not None:
data = data.astype(np.float64)
if scale_factor is not None:
data = data * scale_factor
if add_offset is not None:
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (
self._get_missing_value() or
getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) /
self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if self._typecode not in 'fd' and data.dtype.kind == 'f':
data = np.round(data)
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
self.data.resize(shape)
except ValueError:
self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
self.data[index] = data
def _default_encoded_fill_value(self):
"""
The default encoded fill-value for this Variable's data type.
"""
nc_type = REVERSE[self.typecode(), self.itemsize()]
return FILLMAP[nc_type]
def _get_encoded_fill_value(self):
"""
Returns the encoded fill value for this variable as bytes.
This is taken from either the _FillValue attribute, or the default fill
value for this variable's data type.
"""
if '_FillValue' in self._attributes:
fill_value = np.array(self._attributes['_FillValue'],
dtype=self.data.dtype).tobytes()
if len(fill_value) == self.itemsize():
return fill_value
else:
return self._default_encoded_fill_value()
else:
return self._default_encoded_fill_value()
def _get_missing_value(self):
"""
Returns the value denoting "no data" for this variable.
If this variable does not have a missing/fill value, returns None.
If both _FillValue and missing_value are given, give precedence to
_FillValue. The netCDF standard gives special meaning to _FillValue;
missing_value is just used for compatibility with old datasets.
"""
if '_FillValue' in self._attributes:
missing_value = self._attributes['_FillValue']
elif 'missing_value' in self._attributes:
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
@staticmethod
def _apply_missing_value(data, missing_value):
"""
Applies the given missing value to the data array.
Returns a numpy.ma array, with any value equal to missing_value masked
out (unless missing_value is None, in which case the original array is
returned).
"""
if missing_value is None:
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
# some data types (e.g., characters) cannot be tested for NaN
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
| 39,085
| 34.891644
| 107
|
py
|
scipy
|
scipy-main/scipy/io/_mmio.py
|
"""
Matrix Market I/O in Python.
See http://math.nist.gov/MatrixMarket/formats.html
for information about the Matrix Market format.
"""
#
# Author: Pearu Peterson <pearu@cens.ioc.ee>
# Created: October, 2004
#
# References:
# http://math.nist.gov/MatrixMarket/
#
import os
import numpy as np
from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
ones, can_cast)
from scipy.sparse import coo_matrix, issparse
__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
# -----------------------------------------------------------------------------
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def mminfo(source):
"""
Return size and storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
Examples
--------
>>> from io import StringIO
>>> from scipy.io import mminfo
>>> text = '''%%MatrixMarket matrix coordinate real general
... 5 5 7
... 2 3 1.0
... 3 4 2.0
... 3 5 3.0
... 4 1 4.0
... 4 2 5.0
... 4 3 6.0
... 4 4 7.0
... '''
``mminfo(source)`` returns the number of rows, number of columns,
format, field type and symmetry attribute of the source file.
>>> mminfo(StringIO(text))
(5, 5, 7, 'coordinate', 'real', 'general')
"""
return MMFile.info(source)
# -----------------------------------------------------------------------------
def mmread(source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file-like object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
Examples
--------
>>> from io import StringIO
>>> from scipy.io import mmread
>>> text = '''%%MatrixMarket matrix coordinate real general
... 5 5 7
... 2 3 1.0
... 3 4 2.0
... 3 5 3.0
... 4 1 4.0
... 4 2 5.0
... 4 3 6.0
... 4 4 7.0
... '''
``mmread(source)`` returns the data as sparse matrix in COO format.
>>> m = mmread(StringIO(text))
>>> m
<5x5 sparse matrix of type '<class 'numpy.float64'>'
with 7 stored elements in COOrdinate format>
>>> m.A
array([[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 2., 3.],
[4., 5., 6., 7., 0.],
[0., 0., 0., 0., 0.]])
"""
return MMFile().read(source)
# -----------------------------------------------------------------------------
def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
r"""
Writes the sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2-D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
Returns
-------
None
Examples
--------
>>> from io import BytesIO
>>> import numpy as np
>>> from scipy.sparse import coo_matrix
>>> from scipy.io import mmwrite
Write a small NumPy array to a matrix market file. The file will be
written in the ``'array'`` format.
>>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]])
>>> target = BytesIO()
>>> mmwrite(target, a)
>>> print(target.getvalue().decode('latin1'))
%%MatrixMarket matrix array real general
%
2 4
1.0000000000000000e+00
0.0000000000000000e+00
0.0000000000000000e+00
2.5000000000000000e+00
0.0000000000000000e+00
0.0000000000000000e+00
0.0000000000000000e+00
6.2500000000000000e+00
Add a comment to the output file, and set the precision to 3.
>>> target = BytesIO()
>>> mmwrite(target, a, comment='\n Some test data.\n', precision=3)
>>> print(target.getvalue().decode('latin1'))
%%MatrixMarket matrix array real general
%
% Some test data.
%
2 4
1.000e+00
0.000e+00
0.000e+00
2.500e+00
0.000e+00
0.000e+00
0.000e+00
6.250e+00
Convert to a sparse matrix before calling ``mmwrite``. This will
result in the output format being ``'coordinate'`` rather than
``'array'``.
>>> target = BytesIO()
>>> mmwrite(target, coo_matrix(a), precision=3)
>>> print(target.getvalue().decode('latin1'))
%%MatrixMarket matrix coordinate real general
%
2 4 3
1 1 1.00e+00
2 2 2.50e+00
2 4 6.25e+00
Write a complex Hermitian array to a matrix market file. Note that
only six values are actually written to the file; the other values
are implied by the symmetry.
>>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]])
>>> z
array([[ 3. +0.j, 1. +2.j, 4. -3.j],
[ 1. -2.j, 1. +0.j, -0. -5.j],
[ 4. +3.j, 0. +5.j, 2.5+0.j]])
>>> target = BytesIO()
>>> mmwrite(target, z, precision=2)
>>> print(target.getvalue().decode('latin1'))
%%MatrixMarket matrix array complex hermitian
%
3 3
3.00e+00 0.00e+00
1.00e+00 -2.00e+00
4.00e+00 3.00e+00
1.00e+00 0.00e+00
0.00e+00 5.00e+00
2.50e+00 0.00e+00
"""
MMFile().write(target, a, comment, field, precision, symmetry)
###############################################################################
class MMFile:
__slots__ = ('_rows',
'_cols',
'_entries',
'_format',
'_field',
'_symmetry')
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
@property
def entries(self):
return self._entries
@property
def format(self):
return self._format
@property
def field(self):
return self._field
@property
def symmetry(self):
return self._symmetry
@property
def has_symmetry(self):
return self._symmetry in (self.SYMMETRY_SYMMETRIC,
self.SYMMETRY_SKEW_SYMMETRIC,
self.SYMMETRY_HERMITIAN)
# format values
FORMAT_COORDINATE = 'coordinate'
FORMAT_ARRAY = 'array'
FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
@classmethod
def _validate_format(self, format):
if format not in self.FORMAT_VALUES:
raise ValueError('unknown format type %s, must be one of %s' %
(format, self.FORMAT_VALUES))
# field values
FIELD_INTEGER = 'integer'
FIELD_UNSIGNED = 'unsigned-integer'
FIELD_REAL = 'real'
FIELD_COMPLEX = 'complex'
FIELD_PATTERN = 'pattern'
FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX,
FIELD_PATTERN)
@classmethod
def _validate_field(self, field):
if field not in self.FIELD_VALUES:
raise ValueError('unknown field type %s, must be one of %s' %
(field, self.FIELD_VALUES))
# symmetry values
SYMMETRY_GENERAL = 'general'
SYMMETRY_SYMMETRIC = 'symmetric'
SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
SYMMETRY_HERMITIAN = 'hermitian'
SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
@classmethod
def _validate_symmetry(self, symmetry):
if symmetry not in self.SYMMETRY_VALUES:
raise ValueError('unknown symmetry type %s, must be one of %s' %
(symmetry, self.SYMMETRY_VALUES))
DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp',
FIELD_UNSIGNED: 'uint64',
FIELD_REAL: 'd',
FIELD_COMPLEX: 'D',
FIELD_PATTERN: 'd'}
# -------------------------------------------------------------------------
@staticmethod
def reader():
pass
# -------------------------------------------------------------------------
@staticmethod
def writer():
pass
# -------------------------------------------------------------------------
@classmethod
def info(self, source):
"""
Return size, storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
stream, close_it = self._open(source)
try:
# read and validate header line
line = stream.readline()
mmid, matrix, format, field, symmetry = \
(asstr(part.strip()) for part in line.split())
if not mmid.startswith('%%MatrixMarket'):
raise ValueError('source is not in Matrix Market format')
if not matrix.lower() == 'matrix':
raise ValueError("Problem reading file header: " + line)
# http://math.nist.gov/MatrixMarket/formats.html
if format.lower() == 'array':
format = self.FORMAT_ARRAY
elif format.lower() == 'coordinate':
format = self.FORMAT_COORDINATE
# skip comments
# line.startswith('%')
while line:
if line.lstrip() and line.lstrip()[0] in ['%', 37]:
line = stream.readline()
else:
break
# skip empty lines
while not line.strip():
line = stream.readline()
split_line = line.split()
if format == self.FORMAT_ARRAY:
if not len(split_line) == 2:
raise ValueError("Header line not of length 2: " +
line.decode('ascii'))
rows, cols = map(int, split_line)
entries = rows * cols
else:
if not len(split_line) == 3:
raise ValueError("Header line not of length 3: " +
line.decode('ascii'))
rows, cols, entries = map(int, split_line)
return (rows, cols, entries, format, field.lower(),
symmetry.lower())
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
@staticmethod
def _open(filespec, mode='rb'):
""" Return an open file stream for reading based on source.
If source is a file name, open it (after trying to find it with mtx and
gzipped mtx extensions). Otherwise, just return source.
Parameters
----------
filespec : str or file-like
String giving file name or file-like object
mode : str, optional
Mode with which to open file, if `filespec` is a file name.
Returns
-------
fobj : file-like
Open file-like object.
close_it : bool
True if the calling function should close this file when done,
false otherwise.
"""
# If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class
# implementing a '__fspath__' method), try to convert it to str. If this
# fails by throwing a 'TypeError', assume it's an open file handle and
# return it as-is.
try:
filespec = os.fspath(filespec)
except TypeError:
return filespec, False
# 'filespec' is definitely a str now
# open for reading
if mode[0] == 'r':
# determine filename plus extension
if not os.path.isfile(filespec):
if os.path.isfile(filespec+'.mtx'):
filespec = filespec + '.mtx'
elif os.path.isfile(filespec+'.mtx.gz'):
filespec = filespec + '.mtx.gz'
elif os.path.isfile(filespec+'.mtx.bz2'):
filespec = filespec + '.mtx.bz2'
# open filename
if filespec.endswith('.gz'):
import gzip
stream = gzip.open(filespec, mode)
elif filespec.endswith('.bz2'):
import bz2
stream = bz2.BZ2File(filespec, 'rb')
else:
stream = open(filespec, mode)
# open for writing
else:
if filespec[-4:] != '.mtx':
filespec = filespec + '.mtx'
stream = open(filespec, mode)
return stream, True
# -------------------------------------------------------------------------
@staticmethod
def _get_symmetry(a):
m, n = a.shape
if m != n:
return MMFile.SYMMETRY_GENERAL
issymm = True
isskew = True
isherm = a.dtype.char in 'FD'
# sparse input
if issparse(a):
# check if number of nonzero entries of lower and upper triangle
# matrix are equal
a = a.tocoo()
(row, col) = a.nonzero()
if (row < col).sum() != (row > col).sum():
return MMFile.SYMMETRY_GENERAL
# define iterator over symmetric pair entries
a = a.todok()
def symm_iterator():
for ((i, j), aij) in a.items():
if i > j:
aji = a[j, i]
yield (aij, aji, False)
elif i == j:
yield (aij, aij, True)
# non-sparse input
else:
# define iterator over symmetric pair entries
def symm_iterator():
for j in range(n):
for i in range(j, n):
aij, aji = a[i][j], a[j][i]
yield (aij, aji, i == j)
# check for symmetry
# yields aij, aji, is_diagonal
for (aij, aji, is_diagonal) in symm_iterator():
if isskew and is_diagonal and aij != 0:
isskew = False
else:
if issymm and aij != aji:
issymm = False
with np.errstate(over="ignore"):
# This can give a warning for uint dtypes, so silence that
if isskew and aij != -aji:
isskew = False
if isherm and aij != conj(aji):
isherm = False
if not (issymm or isskew or isherm):
break
# return symmetry value
if issymm:
return MMFile.SYMMETRY_SYMMETRIC
if isskew:
return MMFile.SYMMETRY_SKEW_SYMMETRIC
if isherm:
return MMFile.SYMMETRY_HERMITIAN
return MMFile.SYMMETRY_GENERAL
# -------------------------------------------------------------------------
@staticmethod
def _field_template(field, precision):
return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
MMFile.FIELD_INTEGER: '%i\n',
MMFile.FIELD_UNSIGNED: '%u\n',
MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
(precision, precision)
}.get(field, None)
# -------------------------------------------------------------------------
def __init__(self, **kwargs):
self._init_attrs(**kwargs)
# -------------------------------------------------------------------------
def read(self, source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
stream, close_it = self._open(source)
try:
self._parse_header(stream)
return self._parse_body(stream)
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
def write(self, target, a, comment='', field=None, precision=None,
symmetry=None):
"""
Writes sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2-D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
stream, close_it = self._open(target, 'wb')
try:
self._write(stream, a, comment, field, precision, symmetry)
finally:
if close_it:
stream.close()
else:
stream.flush()
# -------------------------------------------------------------------------
def _init_attrs(self, **kwargs):
"""
Initialize each attributes with the corresponding keyword arg value
or a default of None
"""
attrs = self.__class__.__slots__
public_attrs = [attr[1:] for attr in attrs]
invalid_keys = set(kwargs.keys()) - set(public_attrs)
if invalid_keys:
raise ValueError('''found {} invalid keyword arguments, please only
use {}'''.format(tuple(invalid_keys),
public_attrs))
for attr in attrs:
setattr(self, attr, kwargs.get(attr[1:], None))
# -------------------------------------------------------------------------
def _parse_header(self, stream):
rows, cols, entries, format, field, symmetry = \
self.__class__.info(stream)
self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
field=field, symmetry=symmetry)
# -------------------------------------------------------------------------
def _parse_body(self, stream):
rows, cols, entries, format, field, symm = (self.rows, self.cols,
self.entries, self.format,
self.field, self.symmetry)
dtype = self.DTYPES_BY_FIELD.get(field, None)
has_symmetry = self.has_symmetry
is_integer = field == self.FIELD_INTEGER
is_unsigned_integer = field == self.FIELD_UNSIGNED
is_complex = field == self.FIELD_COMPLEX
is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
is_herm = symm == self.SYMMETRY_HERMITIAN
is_pattern = field == self.FIELD_PATTERN
if format == self.FORMAT_ARRAY:
a = zeros((rows, cols), dtype=dtype)
line = 1
i, j = 0, 0
if is_skew:
a[i, j] = 0
if i < rows - 1:
i += 1
while line:
line = stream.readline()
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
if is_integer:
aij = int(line)
elif is_unsigned_integer:
aij = int(line)
elif is_complex:
aij = complex(*map(float, line.split()))
else:
aij = float(line)
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
if i < rows-1:
i = i + 1
else:
j = j + 1
if not has_symmetry:
i = 0
else:
i = j
if is_skew:
a[i, j] = 0
if i < rows-1:
i += 1
if is_skew:
if not (i in [0, j] and j == cols - 1):
raise ValueError("Parse error, did not read all lines.")
else:
if not (i in [0, j] and j == cols):
raise ValueError("Parse error, did not read all lines.")
elif format == self.FORMAT_COORDINATE:
# Read sparse COOrdinate format
if entries == 0:
# empty matrix
return coo_matrix((rows, cols), dtype=dtype)
I = zeros(entries, dtype='intc')
J = zeros(entries, dtype='intc')
if is_pattern:
V = ones(entries, dtype='int8')
elif is_integer:
V = zeros(entries, dtype='intp')
elif is_unsigned_integer:
V = zeros(entries, dtype='uint64')
elif is_complex:
V = zeros(entries, dtype='complex')
else:
V = zeros(entries, dtype='float')
entry_number = 0
for line in stream:
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
if entry_number+1 > entries:
raise ValueError("'entries' in header is smaller than "
"number of entries")
l = line.split()
I[entry_number], J[entry_number] = map(int, l[:2])
if not is_pattern:
if is_integer:
V[entry_number] = int(l[2])
elif is_unsigned_integer:
V[entry_number] = int(l[2])
elif is_complex:
V[entry_number] = complex(*map(float, l[2:]))
else:
V[entry_number] = float(l[2])
entry_number += 1
if entry_number < entries:
raise ValueError("'entries' in header is larger than "
"number of entries")
I -= 1 # adjust indices (base 1 -> base 0)
J -= 1
if has_symmetry:
mask = (I != J) # off diagonal mask
od_I = I[mask]
od_J = J[mask]
od_V = V[mask]
I = concatenate((I, od_J))
J = concatenate((J, od_I))
if is_skew:
od_V *= -1
elif is_herm:
od_V = od_V.conjugate()
V = concatenate((V, od_V))
a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
else:
raise NotImplementedError(format)
return a
# ------------------------------------------------------------------------
def _write(self, stream, a, comment='', field=None, precision=None,
symmetry=None):
if isinstance(a, list) or isinstance(a, ndarray) or \
isinstance(a, tuple) or hasattr(a, '__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows, cols = a.shape
if field is not None:
if field == self.FIELD_INTEGER:
if not can_cast(a.dtype, 'intp'):
raise OverflowError("mmwrite does not support integer "
"dtypes larger than native 'intp'.")
a = a.astype('intp')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
if not issparse(a):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
if not can_cast(a.dtype, 'intp'):
raise OverflowError("mmwrite does not support integer "
"dtypes larger than native 'intp'.")
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
elif kind == 'u':
field = 'unsigned-integer'
else:
raise TypeError('unexpected dtype kind ' + kind)
if symmetry is None:
symmetry = self._get_symmetry(a)
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symmetry)
# write initial header line
data = f'%%MatrixMarket matrix {rep} {field} {symmetry}\n'
stream.write(data.encode('latin1'))
# write comments
for line in comment.split('\n'):
data = '%%%s\n' % (line)
stream.write(data.encode('latin1'))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
data = '%i %i\n' % (rows, cols)
stream.write(data.encode('latin1'))
if field in (self.FIELD_INTEGER, self.FIELD_REAL,
self.FIELD_UNSIGNED):
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
data = template % a[i, j]
stream.write(data.encode('latin1'))
elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC:
for j in range(cols):
for i in range(j + 1, rows):
data = template % a[i, j]
stream.write(data.encode('latin1'))
else:
for j in range(cols):
for i in range(j, rows):
data = template % a[i, j]
stream.write(data.encode('latin1'))
elif field == self.FIELD_COMPLEX:
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i, j]
data = template % (real(aij), imag(aij))
stream.write(data.encode('latin1'))
else:
for j in range(cols):
for i in range(j, rows):
aij = a[i, j]
data = template % (real(aij), imag(aij))
stream.write(data.encode('latin1'))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
coo = a.tocoo() # convert to COOrdinate format
# if symmetry format used, remove values above main diagonal
if symmetry != self.SYMMETRY_GENERAL:
lower_triangle_mask = coo.row >= coo.col
coo = coo_matrix((coo.data[lower_triangle_mask],
(coo.row[lower_triangle_mask],
coo.col[lower_triangle_mask])),
shape=coo.shape)
# write shape spec
data = '%i %i %i\n' % (rows, cols, coo.nnz)
stream.write(data.encode('latin1'))
template = self._field_template(field, precision-1)
if field == self.FIELD_PATTERN:
for r, c in zip(coo.row+1, coo.col+1):
data = "%i %i\n" % (r, c)
stream.write(data.encode('latin1'))
elif field in (self.FIELD_INTEGER, self.FIELD_REAL,
self.FIELD_UNSIGNED):
for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
data = ("%i %i " % (r, c)) + (template % d)
stream.write(data.encode('latin1'))
elif field == self.FIELD_COMPLEX:
for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
data = ("%i %i " % (r, c)) + (template % (d.real, d.imag))
stream.write(data.encode('latin1'))
else:
raise TypeError('Unknown field type %s' % field)
def _is_fromfile_compatible(stream):
"""
Check whether `stream` is compatible with numpy.fromfile.
Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
Python 3.
"""
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return not isinstance(stream, bad_cls)
| 31,900
| 32.161123
| 81
|
py
|
scipy
|
scipy-main/scipy/io/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('io', parent_package, top_path)
config.add_extension('_test_fortran',
sources=['_test_fortran.pyf', '_test_fortran.f'])
config.add_data_dir('tests')
config.add_subpackage('matlab')
config.add_subpackage('arff')
config.add_subpackage('_harwell_boeing')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 573
| 30.888889
| 74
|
py
|
scipy
|
scipy-main/scipy/io/netcdf.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io` namespace for importing the functions
# included below.
import warnings
from . import _netcdf
__all__ = [ # noqa: F822
'netcdf_file', 'netcdf_variable',
'array', 'LITTLE_ENDIAN', 'IS_PYPY', 'ABSENT', 'ZERO',
'NC_BYTE', 'NC_CHAR', 'NC_SHORT', 'NC_INT', 'NC_FLOAT',
'NC_DOUBLE', 'NC_DIMENSION', 'NC_VARIABLE', 'NC_ATTRIBUTE',
'FILL_BYTE', 'FILL_CHAR', 'FILL_SHORT', 'FILL_INT', 'FILL_FLOAT',
'FILL_DOUBLE', 'TYPEMAP', 'FILLMAP', 'REVERSE', 'NetCDFFile',
'NetCDFVariable'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.netcdf is deprecated and has no attribute "
f"{name}. Try looking in scipy.io instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
"the `scipy.io.netcdf` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_netcdf, name)
| 1,080
| 30.794118
| 76
|
py
|
scipy
|
scipy-main/scipy/io/harwell_boeing.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io` namespace for importing the functions
# included below.
import warnings
from . import _harwell_boeing
__all__ = [ # noqa: F822
'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo',
'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat',
'ExpFormat', 'BadFortranFormat', 'hb'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.harwell_boeing is deprecated and has no attribute "
f"{name}. Try looking in scipy.io instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
"the `scipy.io.harwell_boeing` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_harwell_boeing, name)
| 898
| 28.966667
| 76
|
py
|
scipy
|
scipy-main/scipy/io/_fortran.py
|
"""
Module to read / write Fortran unformatted sequential files.
This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz.
"""
import warnings
import numpy as np
__all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError']
class FortranEOFError(TypeError, OSError):
"""Indicates that the file ended properly.
This error descends from TypeError because the code used to raise
TypeError (and this was the only way to know that the file had
ended) so users might have ``except TypeError:``.
"""
pass
class FortranFormattingError(TypeError, OSError):
"""Indicates that the file ended mid-record.
Descends from TypeError for backward compatibility.
"""
pass
class FortranFile:
"""
A file object for unformatted sequential files from Fortran code.
Parameters
----------
filename : file or str
Open file object or filename.
mode : {'r', 'w'}, optional
Read-write mode, default is 'r'.
header_dtype : dtype, optional
Data type of the header. Size and endiness must match the input/output file.
Notes
-----
These files are broken up into records of unspecified types. The size of
each record is given at the start (although the size of this header is not
standard) and the data is written onto disk without any formatting. Fortran
compilers supporting the BACKSPACE statement will write a second copy of
the size to facilitate backwards seeking.
This class only supports files written with both sizes for the record.
It also does not support the subrecords used in Intel and gfortran compilers
for records which are greater than 2GB with a 4-byte header.
An example of an unformatted sequential file in Fortran would be written as::
OPEN(1, FILE=myfilename, FORM='unformatted')
WRITE(1) myvariable
Since this is a non-standard file format, whose contents depend on the
compiler and the endianness of the machine, caution is advised. Files from
gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work.
Consider using Fortran direct-access files or files from the newer Stream
I/O, which can be easily read by `numpy.fromfile`.
Examples
--------
To create an unformatted sequential Fortran file:
>>> from scipy.io import FortranFile
>>> import numpy as np
>>> f = FortranFile('test.unf', 'w')
>>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32))
>>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T)
>>> f.close()
To read this file:
>>> f = FortranFile('test.unf', 'r')
>>> print(f.read_ints(np.int32))
[1 2 3 4 5]
>>> print(f.read_reals(float).reshape((5,4), order="F"))
[[0. 0.05263158 0.10526316 0.15789474]
[0.21052632 0.26315789 0.31578947 0.36842105]
[0.42105263 0.47368421 0.52631579 0.57894737]
[0.63157895 0.68421053 0.73684211 0.78947368]
[0.84210526 0.89473684 0.94736842 1. ]]
>>> f.close()
Or, in Fortran::
integer :: a(5), i
double precision :: b(5,4)
open(1, file='test.unf', form='unformatted')
read(1) a
read(1) b
close(1)
write(*,*) a
do i = 1, 5
write(*,*) b(i,:)
end do
"""
def __init__(self, filename, mode='r', header_dtype=np.uint32):
if header_dtype is None:
raise ValueError('Must specify dtype')
header_dtype = np.dtype(header_dtype)
if header_dtype.kind != 'u':
warnings.warn("Given a dtype which is not unsigned.")
if mode not in 'rw' or len(mode) != 1:
raise ValueError('mode must be either r or w')
if hasattr(filename, 'seek'):
self._fp = filename
else:
self._fp = open(filename, '%sb' % mode)
self._header_dtype = header_dtype
def _read_size(self, eof_ok=False):
n = self._header_dtype.itemsize
b = self._fp.read(n)
if (not b) and eof_ok:
raise FortranEOFError("End of file occurred at end of record")
elif len(b) < n:
raise FortranFormattingError(
"End of file in the middle of the record size")
return int(np.frombuffer(b, dtype=self._header_dtype, count=1)[0])
def write_record(self, *items):
"""
Write a record (including sizes) to the file.
Parameters
----------
*items : array_like
The data arrays to write.
Notes
-----
Writes data items to a file::
write_record(a.T, b.T, c.T, ...)
write(1) a, b, c, ...
Note that data in multidimensional arrays is written in
row-major order --- to make them read correctly by Fortran
programs, you need to transpose the arrays yourself when
writing them.
"""
items = tuple(np.asarray(item) for item in items)
total_size = sum(item.nbytes for item in items)
nb = np.array([total_size], dtype=self._header_dtype)
nb.tofile(self._fp)
for item in items:
item.tofile(self._fp)
nb.tofile(self._fp)
def read_record(self, *dtypes, **kwargs):
"""
Reads a record of a given type from the file.
Parameters
----------
*dtypes : dtypes, optional
Data type(s) specifying the size and endiness of the data.
Returns
-------
data : ndarray
A 1-D array object.
Raises
------
FortranEOFError
To signal that no further records are available
FortranFormattingError
To signal that the end of the file was encountered
part-way through a record
Notes
-----
If the record contains a multidimensional array, you can specify
the size in the dtype. For example::
INTEGER var(5,4)
can be read with::
read_record('(4,5)i4').T
Note that this function does **not** assume the file data is in Fortran
column major order, so you need to (i) swap the order of dimensions
when reading and (ii) transpose the resulting array.
Alternatively, you can read the data as a 1-D array and handle the
ordering yourself. For example::
read_record('i4').reshape(5, 4, order='F')
For records that contain several variables or mixed types (as opposed
to single scalar or array types), give them as separate arguments::
double precision :: a
integer :: b
write(1) a, b
record = f.read_record('<f4', '<i4')
a = record[0] # first number
b = record[1] # second number
and if any of the variables are arrays, the shape can be specified as
the third item in the relevant dtype::
double precision :: a
integer :: b(3,4)
write(1) a, b
record = f.read_record('<f4', np.dtype(('<i4', (4, 3))))
a = record[0]
b = record[1].T
NumPy also supports a short syntax for this kind of type::
record = f.read_record('<f4', '(3,3)<i4')
See Also
--------
read_reals
read_ints
"""
dtype = kwargs.pop('dtype', None)
if kwargs:
raise ValueError(f"Unknown keyword arguments {tuple(kwargs.keys())}")
if dtype is not None:
dtypes = dtypes + (dtype,)
elif not dtypes:
raise ValueError('Must specify at least one dtype')
first_size = self._read_size(eof_ok=True)
dtypes = tuple(np.dtype(dtype) for dtype in dtypes)
block_size = sum(dtype.itemsize for dtype in dtypes)
num_blocks, remainder = divmod(first_size, block_size)
if remainder != 0:
raise ValueError('Size obtained ({}) is not a multiple of the '
'dtypes given ({}).'.format(first_size, block_size))
if len(dtypes) != 1 and first_size != block_size:
# Fortran does not write mixed type array items in interleaved order,
# and it's not possible to guess the sizes of the arrays that were written.
# The user must specify the exact sizes of each of the arrays.
raise ValueError('Size obtained ({}) does not match with the expected '
'size ({}) of multi-item record'.format(first_size, block_size))
data = []
for dtype in dtypes:
r = np.fromfile(self._fp, dtype=dtype, count=num_blocks)
if len(r) != num_blocks:
raise FortranFormattingError(
"End of file in the middle of a record")
if dtype.shape != ():
# Squeeze outmost block dimension for array items
if num_blocks == 1:
assert r.shape == (1,) + dtype.shape
r = r[0]
data.append(r)
second_size = self._read_size()
if first_size != second_size:
raise ValueError('Sizes do not agree in the header and footer for '
'this record - check header dtype')
# Unpack result
if len(dtypes) == 1:
return data[0]
else:
return tuple(data)
def read_ints(self, dtype='i4'):
"""
Reads a record of a given type from the file, defaulting to an integer
type (``INTEGER*4`` in Fortran).
Parameters
----------
dtype : dtype, optional
Data type specifying the size and endiness of the data.
Returns
-------
data : ndarray
A 1-D array object.
See Also
--------
read_reals
read_record
"""
return self.read_record(dtype)
def read_reals(self, dtype='f8'):
"""
Reads a record of a given type from the file, defaulting to a floating
point number (``real*8`` in Fortran).
Parameters
----------
dtype : dtype, optional
Data type specifying the size and endiness of the data.
Returns
-------
data : ndarray
A 1-D array object.
See Also
--------
read_ints
read_record
"""
return self.read_record(dtype)
def close(self):
"""
Closes the file. It is unsupported to call any other methods off this
object after closing it. Note that this class supports the 'with'
statement in modern versions of Python, to call this automatically
"""
self._fp.close()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
| 10,891
| 29.68169
| 93
|
py
|
scipy
|
scipy-main/scipy/io/wavfile.py
|
"""
Module to read / write wav files using NumPy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a NumPy array as a WAV file.
"""
import io
import sys
import numpy
import struct
import warnings
from enum import IntEnum
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
class WAVE_FORMAT(IntEnum):
"""
WAVE form wFormatTag IDs
Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the
newest additions, in v10.0.14393 2016-07
"""
UNKNOWN = 0x0000
PCM = 0x0001
ADPCM = 0x0002
IEEE_FLOAT = 0x0003
VSELP = 0x0004
IBM_CVSD = 0x0005
ALAW = 0x0006
MULAW = 0x0007
DTS = 0x0008
DRM = 0x0009
WMAVOICE9 = 0x000A
WMAVOICE10 = 0x000B
OKI_ADPCM = 0x0010
DVI_ADPCM = 0x0011
IMA_ADPCM = 0x0011 # Duplicate
MEDIASPACE_ADPCM = 0x0012
SIERRA_ADPCM = 0x0013
G723_ADPCM = 0x0014
DIGISTD = 0x0015
DIGIFIX = 0x0016
DIALOGIC_OKI_ADPCM = 0x0017
MEDIAVISION_ADPCM = 0x0018
CU_CODEC = 0x0019
HP_DYN_VOICE = 0x001A
YAMAHA_ADPCM = 0x0020
SONARC = 0x0021
DSPGROUP_TRUESPEECH = 0x0022
ECHOSC1 = 0x0023
AUDIOFILE_AF36 = 0x0024
APTX = 0x0025
AUDIOFILE_AF10 = 0x0026
PROSODY_1612 = 0x0027
LRC = 0x0028
DOLBY_AC2 = 0x0030
GSM610 = 0x0031
MSNAUDIO = 0x0032
ANTEX_ADPCME = 0x0033
CONTROL_RES_VQLPC = 0x0034
DIGIREAL = 0x0035
DIGIADPCM = 0x0036
CONTROL_RES_CR10 = 0x0037
NMS_VBXADPCM = 0x0038
CS_IMAADPCM = 0x0039
ECHOSC3 = 0x003A
ROCKWELL_ADPCM = 0x003B
ROCKWELL_DIGITALK = 0x003C
XEBEC = 0x003D
G721_ADPCM = 0x0040
G728_CELP = 0x0041
MSG723 = 0x0042
INTEL_G723_1 = 0x0043
INTEL_G729 = 0x0044
SHARP_G726 = 0x0045
MPEG = 0x0050
RT24 = 0x0052
PAC = 0x0053
MPEGLAYER3 = 0x0055
LUCENT_G723 = 0x0059
CIRRUS = 0x0060
ESPCM = 0x0061
VOXWARE = 0x0062
CANOPUS_ATRAC = 0x0063
G726_ADPCM = 0x0064
G722_ADPCM = 0x0065
DSAT = 0x0066
DSAT_DISPLAY = 0x0067
VOXWARE_BYTE_ALIGNED = 0x0069
VOXWARE_AC8 = 0x0070
VOXWARE_AC10 = 0x0071
VOXWARE_AC16 = 0x0072
VOXWARE_AC20 = 0x0073
VOXWARE_RT24 = 0x0074
VOXWARE_RT29 = 0x0075
VOXWARE_RT29HW = 0x0076
VOXWARE_VR12 = 0x0077
VOXWARE_VR18 = 0x0078
VOXWARE_TQ40 = 0x0079
VOXWARE_SC3 = 0x007A
VOXWARE_SC3_1 = 0x007B
SOFTSOUND = 0x0080
VOXWARE_TQ60 = 0x0081
MSRT24 = 0x0082
G729A = 0x0083
MVI_MVI2 = 0x0084
DF_G726 = 0x0085
DF_GSM610 = 0x0086
ISIAUDIO = 0x0088
ONLIVE = 0x0089
MULTITUDE_FT_SX20 = 0x008A
INFOCOM_ITS_G721_ADPCM = 0x008B
CONVEDIA_G729 = 0x008C
CONGRUENCY = 0x008D
SBC24 = 0x0091
DOLBY_AC3_SPDIF = 0x0092
MEDIASONIC_G723 = 0x0093
PROSODY_8KBPS = 0x0094
ZYXEL_ADPCM = 0x0097
PHILIPS_LPCBB = 0x0098
PACKED = 0x0099
MALDEN_PHONYTALK = 0x00A0
RACAL_RECORDER_GSM = 0x00A1
RACAL_RECORDER_G720_A = 0x00A2
RACAL_RECORDER_G723_1 = 0x00A3
RACAL_RECORDER_TETRA_ACELP = 0x00A4
NEC_AAC = 0x00B0
RAW_AAC1 = 0x00FF
RHETOREX_ADPCM = 0x0100
IRAT = 0x0101
VIVO_G723 = 0x0111
VIVO_SIREN = 0x0112
PHILIPS_CELP = 0x0120
PHILIPS_GRUNDIG = 0x0121
DIGITAL_G723 = 0x0123
SANYO_LD_ADPCM = 0x0125
SIPROLAB_ACEPLNET = 0x0130
SIPROLAB_ACELP4800 = 0x0131
SIPROLAB_ACELP8V3 = 0x0132
SIPROLAB_G729 = 0x0133
SIPROLAB_G729A = 0x0134
SIPROLAB_KELVIN = 0x0135
VOICEAGE_AMR = 0x0136
G726ADPCM = 0x0140
DICTAPHONE_CELP68 = 0x0141
DICTAPHONE_CELP54 = 0x0142
QUALCOMM_PUREVOICE = 0x0150
QUALCOMM_HALFRATE = 0x0151
TUBGSM = 0x0155
MSAUDIO1 = 0x0160
WMAUDIO2 = 0x0161
WMAUDIO3 = 0x0162
WMAUDIO_LOSSLESS = 0x0163
WMASPDIF = 0x0164
UNISYS_NAP_ADPCM = 0x0170
UNISYS_NAP_ULAW = 0x0171
UNISYS_NAP_ALAW = 0x0172
UNISYS_NAP_16K = 0x0173
SYCOM_ACM_SYC008 = 0x0174
SYCOM_ACM_SYC701_G726L = 0x0175
SYCOM_ACM_SYC701_CELP54 = 0x0176
SYCOM_ACM_SYC701_CELP68 = 0x0177
KNOWLEDGE_ADVENTURE_ADPCM = 0x0178
FRAUNHOFER_IIS_MPEG2_AAC = 0x0180
DTS_DS = 0x0190
CREATIVE_ADPCM = 0x0200
CREATIVE_FASTSPEECH8 = 0x0202
CREATIVE_FASTSPEECH10 = 0x0203
UHER_ADPCM = 0x0210
ULEAD_DV_AUDIO = 0x0215
ULEAD_DV_AUDIO_1 = 0x0216
QUARTERDECK = 0x0220
ILINK_VC = 0x0230
RAW_SPORT = 0x0240
ESST_AC3 = 0x0241
GENERIC_PASSTHRU = 0x0249
IPI_HSX = 0x0250
IPI_RPELP = 0x0251
CS2 = 0x0260
SONY_SCX = 0x0270
SONY_SCY = 0x0271
SONY_ATRAC3 = 0x0272
SONY_SPC = 0x0273
TELUM_AUDIO = 0x0280
TELUM_IA_AUDIO = 0x0281
NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285
FM_TOWNS_SND = 0x0300
MICRONAS = 0x0350
MICRONAS_CELP833 = 0x0351
BTV_DIGITAL = 0x0400
INTEL_MUSIC_CODER = 0x0401
INDEO_AUDIO = 0x0402
QDESIGN_MUSIC = 0x0450
ON2_VP7_AUDIO = 0x0500
ON2_VP6_AUDIO = 0x0501
VME_VMPCM = 0x0680
TPC = 0x0681
LIGHTWAVE_LOSSLESS = 0x08AE
OLIGSM = 0x1000
OLIADPCM = 0x1001
OLICELP = 0x1002
OLISBC = 0x1003
OLIOPR = 0x1004
LH_CODEC = 0x1100
LH_CODEC_CELP = 0x1101
LH_CODEC_SBC8 = 0x1102
LH_CODEC_SBC12 = 0x1103
LH_CODEC_SBC16 = 0x1104
NORRIS = 0x1400
ISIAUDIO_2 = 0x1401
SOUNDSPACE_MUSICOMPRESS = 0x1500
MPEG_ADTS_AAC = 0x1600
MPEG_RAW_AAC = 0x1601
MPEG_LOAS = 0x1602
NOKIA_MPEG_ADTS_AAC = 0x1608
NOKIA_MPEG_RAW_AAC = 0x1609
VODAFONE_MPEG_ADTS_AAC = 0x160A
VODAFONE_MPEG_RAW_AAC = 0x160B
MPEG_HEAAC = 0x1610
VOXWARE_RT24_SPEECH = 0x181C
SONICFOUNDRY_LOSSLESS = 0x1971
INNINGS_TELECOM_ADPCM = 0x1979
LUCENT_SX8300P = 0x1C07
LUCENT_SX5363S = 0x1C0C
CUSEEME = 0x1F03
NTCSOFT_ALF2CM_ACM = 0x1FC4
DVM = 0x2000
DTS2 = 0x2001
MAKEAVIS = 0x3313
DIVIO_MPEG4_AAC = 0x4143
NOKIA_ADAPTIVE_MULTIRATE = 0x4201
DIVIO_G726 = 0x4243
LEAD_SPEECH = 0x434C
LEAD_VORBIS = 0x564C
WAVPACK_AUDIO = 0x5756
OGG_VORBIS_MODE_1 = 0x674F
OGG_VORBIS_MODE_2 = 0x6750
OGG_VORBIS_MODE_3 = 0x6751
OGG_VORBIS_MODE_1_PLUS = 0x676F
OGG_VORBIS_MODE_2_PLUS = 0x6770
OGG_VORBIS_MODE_3_PLUS = 0x6771
ALAC = 0x6C61
_3COM_NBX = 0x7000 # Can't have leading digit
OPUS = 0x704F
FAAD_AAC = 0x706D
AMR_NB = 0x7361
AMR_WB = 0x7362
AMR_WP = 0x7363
GSM_AMR_CBR = 0x7A21
GSM_AMR_VBR_SID = 0x7A22
COMVERSE_INFOSYS_G723_1 = 0xA100
COMVERSE_INFOSYS_AVQSBC = 0xA101
COMVERSE_INFOSYS_SBC = 0xA102
SYMBOL_G729_A = 0xA103
VOICEAGE_AMR_WB = 0xA104
INGENIENT_G726 = 0xA105
MPEG4_AAC = 0xA106
ENCORE_G726 = 0xA107
ZOLL_ASAO = 0xA108
SPEEX_VOICE = 0xA109
VIANIX_MASC = 0xA10A
WM9_SPECTRUM_ANALYZER = 0xA10B
WMF_SPECTRUM_ANAYZER = 0xA10C
GSM_610 = 0xA10D
GSM_620 = 0xA10E
GSM_660 = 0xA10F
GSM_690 = 0xA110
GSM_ADAPTIVE_MULTIRATE_WB = 0xA111
POLYCOM_G722 = 0xA112
POLYCOM_G728 = 0xA113
POLYCOM_G729_A = 0xA114
POLYCOM_SIREN = 0xA115
GLOBAL_IP_ILBC = 0xA116
RADIOTIME_TIME_SHIFT_RADIO = 0xA117
NICE_ACA = 0xA118
NICE_ADPCM = 0xA119
VOCORD_G721 = 0xA11A
VOCORD_G726 = 0xA11B
VOCORD_G722_1 = 0xA11C
VOCORD_G728 = 0xA11D
VOCORD_G729 = 0xA11E
VOCORD_G729_A = 0xA11F
VOCORD_G723_1 = 0xA120
VOCORD_LBC = 0xA121
NICE_G728 = 0xA122
FRACE_TELECOM_G729 = 0xA123
CODIAN = 0xA124
FLAC = 0xF1AC
EXTENSIBLE = 0xFFFE
DEVELOPMENT = 0xFFFF
KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT}
def _raise_bad_format(format_tag):
try:
format_name = WAVE_FORMAT(format_tag).name
except ValueError:
format_name = f'{format_tag:#06x}'
raise ValueError(f"Unknown wave file format: {format_name}. Supported "
"formats: " +
', '.join(x.name for x in KNOWN_WAVE_FORMATS))
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
Notes
-----
Assumes file pointer is immediately after the 'fmt ' id
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = struct.unpack(fmt+'I', fid.read(4))[0]
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read = 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
_raise_bad_format(format_tag)
# move file pointer to next chunk
if size > bytes_read:
fid.read(size - bytes_read)
# fmt should always be 16, 18 or 40, but handle it just in case
_handle_pad_byte(fid, size)
if format_tag == WAVE_FORMAT.PCM:
if bytes_per_second != fs * block_align:
raise ValueError("WAV header is invalid: nAvgBytesPerSec must"
" equal product of nSamplesPerSec and"
" nBlockAlign, but file has nSamplesPerSec ="
f" {fs}, nBlockAlign = {block_align}, and"
f" nAvgBytesPerSec = {bytes_per_second}")
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
block_align, mmap=False):
"""
Notes
-----
Assumes file pointer is immediately after the 'data' id
It's possible to not use all available bits in a container, or to store
samples in a container bigger than necessary, so bytes_per_sample uses
the actual reported container size (nBlockAlign / nChannels). Real-world
examples:
Adobe Audition's "24-bit packed int (type 1, 20-bit)"
nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav
is:
nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf
gives an example of:
nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
# Size of the data subchunk in bytes
size = struct.unpack(fmt+'I', fid.read(4))[0]
# Number of bytes per sample (sample container size)
bytes_per_sample = block_align // channels
n_samples = size // bytes_per_sample
if format_tag == WAVE_FORMAT.PCM:
if 1 <= bit_depth <= 8:
dtype = 'u1' # WAV of 8-bit integer or less are unsigned
elif bytes_per_sample in {3, 5, 6, 7}:
# No compatible dtype. Load as raw bytes for reshaping later.
dtype = 'V1'
elif bit_depth <= 64:
# Remaining bit depths can map directly to signed numpy dtypes
dtype = f'{fmt}i{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit integer data.")
elif format_tag == WAVE_FORMAT.IEEE_FLOAT:
if bit_depth in {32, 64}:
dtype = f'{fmt}f{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit floating-point data.")
else:
_raise_bad_format(format_tag)
start = fid.tell()
if not mmap:
try:
count = size if dtype == 'V1' else n_samples
data = numpy.fromfile(fid, dtype=dtype, count=count)
except io.UnsupportedOperation: # not a C-like file
fid.seek(start, 0) # just in case it seeked, though it shouldn't
data = numpy.frombuffer(fid.read(size), dtype=dtype)
if dtype == 'V1':
# Rearrange raw bytes into smallest compatible numpy dtype
dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8'
a = numpy.zeros((len(data) // bytes_per_sample, numpy.dtype(dt).itemsize),
dtype='V1')
if is_big_endian:
a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample))
else:
a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample))
data = a.view(dt).reshape(a.shape[:-1])
else:
if bytes_per_sample in {1, 2, 4, 8}:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(n_samples,))
fid.seek(start + size)
else:
raise ValueError("mmap=True not compatible with "
f"{bytes_per_sample}-byte container size.")
_handle_pad_byte(fid, size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
_handle_pad_byte(fid, size)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError(f"File format {repr(str1)} not understood. Only "
"'RIFF' and 'RIFX' supported.")
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError(f"Not a WAV file. RIFF form type is {repr(str2)}.")
return file_size, is_big_endian
def _handle_pad_byte(fid, size):
# "If the chunk size is an odd number of bytes, a pad byte with value zero
# is written after ckData." So we need to seek past this after each chunk.
if size % 2:
fid.seek(1, 1)
def read(filename, mmap=False):
"""
Open a WAV file.
Return the sample rate (in samples/sec) and data from an LPCM WAV file.
Parameters
----------
filename : string or open file handle
Input WAV file.
mmap : bool, optional
Whether to read data as memory-mapped (default: False). Not compatible
with some bit depths; see Notes. Only to be used on real files.
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of WAV file.
data : numpy array
Data read from WAV file. Data-type is determined from the file;
see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape
(Nsamples, Nchannels) otherwise. If a file-like input without a
C-like file descriptor (e.g., :class:`python:io.BytesIO`) is
passed, this will not be writeable.
Notes
-----
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -2147483648 +2147483392 int32
16-bit integer PCM -32768 +32767 int16
8-bit integer PCM 0 255 uint8
===================== =========== =========== =============
WAV files can specify arbitrary bit depth, and this function supports
reading any integer PCM depth from 1 to 64 bits. Data is returned in the
smallest compatible numpy int type, in left-justified format. 8-bit and
lower is unsigned, while 9-bit and higher is signed.
For example, 24-bit data will be stored as int32, with the MSB of the
24-bit data stored at the MSB of the int32, and typically the least
significant byte is 0x00. (However, if a file actually contains data past
its specified bit depth, those bits will be read and output, too. [2]_)
This bit justification and sign matches WAV's native internal format, which
allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample
(so 24-bit files cannot be memory-mapped, but 32-bit can).
IEEE float PCM in 32- or 64-bit format is supported, with or without mmap.
Values exceeding [-1, +1] are not clipped.
Non-linear PCM (mu-law, A-law) is not supported.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
.. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section
"Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> from scipy.io import wavfile
>>> import scipy.io
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
msg = f"Incomplete chunk ID: {repr(chunk_id)}"
# If we have the data, ignore the broken chunk
if fmt_chunk_received and data_chunk_received:
warnings.warn(msg + ", ignoring it.", WavFileWarning,
stacklevel=2)
else:
raise ValueError(msg)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
block_align = fmt_chunk[5]
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, block_align, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in {b'JUNK', b'Fake'}:
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a NumPy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D NumPy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
Create a 100Hz sine wave, sampled at 44100Hz.
Write to 16-bit PCM, Mono.
>>> from scipy.io.wavfile import write
>>> import numpy as np
>>> samplerate = 44100; fs = 100
>>> t = np.linspace(0., 1., samplerate)
>>> amplitude = np.iinfo(np.int16).max
>>> data = amplitude * np.sin(2. * np.pi * fs * t)
>>> write("example.wav", samplerate, data.astype(np.int16))
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT.IEEE_FLOAT
else:
format_tag = WAVE_FORMAT.PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
| 26,642
| 30.680143
| 94
|
py
|
scipy
|
scipy-main/scipy/io/idl.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io` namespace for importing the functions
# included below.
import warnings
from . import _idl
__all__ = [ # noqa: F822
'readsav', 'DTYPE_DICT', 'RECTYPE_DICT', 'STRUCT_DICT',
'Pointer', 'ObjectPointer', 'AttrDict'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.idl is deprecated and has no attribute "
f"{name}. Try looking in scipy.io instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
"the `scipy.io.idl` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_idl, name)
| 794
| 25.5
| 76
|
py
|
scipy
|
scipy-main/scipy/io/__init__.py
|
"""
==================================
Input and output (:mod:`scipy.io`)
==================================
.. currentmodule:: scipy.io
SciPy has many modules, classes, and functions available to read data
from and write data to a variety of file formats.
.. seealso:: `NumPy IO routines <https://www.numpy.org/devdocs/reference/routines.io.html>`__
MATLAB® files
=============
.. autosummary::
:toctree: generated/
loadmat - Read a MATLAB style mat file (version 4 through 7.1)
savemat - Write a MATLAB style mat file (version 4 through 7.1)
whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
For low-level MATLAB reading and writing utilities, see `scipy.io.matlab`.
IDL® files
==========
.. autosummary::
:toctree: generated/
readsav - Read an IDL 'save' file
Matrix Market files
===================
.. autosummary::
:toctree: generated/
mminfo - Query matrix info from Matrix Market formatted file
mmread - Read matrix from Matrix Market formatted file
mmwrite - Write matrix to Matrix Market formatted file
Unformatted Fortran files
===============================
.. autosummary::
:toctree: generated/
FortranFile - A file object for unformatted sequential Fortran files
FortranEOFError - Exception indicating the end of a well-formed file
FortranFormattingError - Exception indicating an inappropriate end
Netcdf
======
.. autosummary::
:toctree: generated/
netcdf_file - A file object for NetCDF data
netcdf_variable - A data object for the netcdf module
Harwell-Boeing files
====================
.. autosummary::
:toctree: generated/
hb_read -- read H-B file
hb_write -- write H-B file
Wav sound files (:mod:`scipy.io.wavfile`)
=========================================
.. module:: scipy.io.wavfile
.. autosummary::
:toctree: generated/
read
write
WavFileWarning
Arff files (:mod:`scipy.io.arff`)
=================================
.. module:: scipy.io.arff
.. autosummary::
:toctree: generated/
loadarff
MetaData
ArffError
ParseArffError
"""
# matfile read and write
from .matlab import loadmat, savemat, whosmat
# netCDF file support
from ._netcdf import netcdf_file, netcdf_variable
# Fortran file support
from ._fortran import FortranFile, FortranEOFError, FortranFormattingError
from ._mmio import mminfo, mmread, mmwrite
from ._idl import readsav
from ._harwell_boeing import hb_read, hb_write
# Deprecated namespaces, to be removed in v2.0.0
from . import arff, harwell_boeing, idl, mmio, netcdf, wavfile
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 2,719
| 22.247863
| 93
|
py
|
scipy
|
scipy-main/scipy/io/_idl.py
|
# IDLSave - a python module to read IDL 'save' files
# Copyright (c) 2010 Thomas P. Robitaille
# Many thanks to Craig Markwardt for publishing the Unofficial Format
# Specification for IDL .sav files, without which this Python module would not
# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
# This code was developed by with permission from ITT Visual Information
# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
# Inc. for their Interactive Data Language software.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__all__ = ['readsav']
import struct
import numpy as np
import tempfile
import zlib
import warnings
# Define the different data types that can be found in an IDL save file
DTYPE_DICT = {1: '>u1',
2: '>i2',
3: '>i4',
4: '>f4',
5: '>f8',
6: '>c8',
7: '|O',
8: '|O',
9: '>c16',
10: '|O',
11: '|O',
12: '>u2',
13: '>u4',
14: '>i8',
15: '>u8'}
# Define the different record types that can be found in an IDL save file
RECTYPE_DICT = {0: "START_MARKER",
1: "COMMON_VARIABLE",
2: "VARIABLE",
3: "SYSTEM_VARIABLE",
6: "END_MARKER",
10: "TIMESTAMP",
12: "COMPILED",
13: "IDENTIFICATION",
14: "VERSION",
15: "HEAP_HEADER",
16: "HEAP_DATA",
17: "PROMOTE64",
19: "NOTICE",
20: "DESCRIPTION"}
# Define a dictionary to contain structure definitions
STRUCT_DICT = {}
def _align_32(f):
'''Align to the next 32-bit position in a file'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _skip_bytes(f, n):
'''Skip `n` bytes'''
f.read(n)
return
def _read_bytes(f, n):
'''Read the next `n` bytes'''
return f.read(n)
def _read_byte(f):
'''Read a single byte'''
return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
def _read_long(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>l', f.read(4))[0])
def _read_int16(f):
'''Read a signed 16-bit integer'''
return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer'''
return np.int32(struct.unpack('>i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer'''
return np.int64(struct.unpack('>q', f.read(8))[0])
def _read_uint16(f):
'''Read an unsigned 16-bit integer'''
return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
def _read_uint32(f):
'''Read an unsigned 32-bit integer'''
return np.uint32(struct.unpack('>I', f.read(4))[0])
def _read_uint64(f):
'''Read an unsigned 64-bit integer'''
return np.uint64(struct.unpack('>Q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float'''
return np.float32(struct.unpack('>f', f.read(4))[0])
def _read_float64(f):
'''Read a 64-bit float'''
return np.float64(struct.unpack('>d', f.read(8))[0])
class Pointer:
'''Class used to define pointers'''
def __init__(self, index):
self.index = index
return
class ObjectPointer(Pointer):
'''Class used to define object pointers'''
pass
def _read_string(f):
'''Read a string'''
length = _read_long(f)
if length > 0:
chars = _read_bytes(f, length).decode('latin1')
_align_32(f)
else:
chars = ''
return chars
def _read_string_data(f):
'''Read a data string (length is specified twice)'''
length = _read_long(f)
if length > 0:
length = _read_long(f)
string_data = _read_bytes(f, length)
_align_32(f)
else:
string_data = ''
return string_data
def _read_data(f, dtype):
'''Read a variable with a specified data type'''
if dtype == 1:
if _read_int32(f) != 1:
raise Exception("Error occurred while reading byte variable")
return _read_byte(f)
elif dtype == 2:
return _read_int16(f)
elif dtype == 3:
return _read_int32(f)
elif dtype == 4:
return _read_float32(f)
elif dtype == 5:
return _read_float64(f)
elif dtype == 6:
real = _read_float32(f)
imag = _read_float32(f)
return np.complex64(real + imag * 1j)
elif dtype == 7:
return _read_string_data(f)
elif dtype == 8:
raise Exception("Should not be here - please report this")
elif dtype == 9:
real = _read_float64(f)
imag = _read_float64(f)
return np.complex128(real + imag * 1j)
elif dtype == 10:
return Pointer(_read_int32(f))
elif dtype == 11:
return ObjectPointer(_read_int32(f))
elif dtype == 12:
return _read_uint16(f)
elif dtype == 13:
return _read_uint32(f)
elif dtype == 14:
return _read_int64(f)
elif dtype == 15:
return _read_uint64(f)
else:
raise Exception("Unknown IDL type: %i - please report this" % dtype)
def _read_structure(f, array_desc, struct_desc):
'''
Read a structure, with the array and structure descriptors given as
`array_desc` and `structure_desc` respectively.
'''
nrows = array_desc['nelements']
columns = struct_desc['tagtable']
dtype = []
for col in columns:
if col['structure'] or col['array']:
dtype.append(((col['name'].lower(), col['name']), np.object_))
else:
if col['typecode'] in DTYPE_DICT:
dtype.append(((col['name'].lower(), col['name']),
DTYPE_DICT[col['typecode']]))
else:
raise Exception("Variable type %i not implemented" %
col['typecode'])
structure = np.recarray((nrows, ), dtype=dtype)
for i in range(nrows):
for col in columns:
dtype = col['typecode']
if col['structure']:
structure[col['name']][i] = _read_structure(f,
struct_desc['arrtable'][col['name']],
struct_desc['structtable'][col['name']])
elif col['array']:
structure[col['name']][i] = _read_array(f, dtype,
struct_desc['arrtable'][col['name']])
else:
structure[col['name']][i] = _read_data(f, dtype)
# Reshape structure if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
structure = structure.reshape(dims)
return structure
def _read_array(f, typecode, array_desc):
'''
Read an array of type `typecode`, with the array descriptor given as
`array_desc`.
'''
if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
if typecode == 1:
nbytes = _read_int32(f)
if nbytes != array_desc['nbytes']:
warnings.warn("Not able to verify number of bytes from header")
# Read bytes as numpy array
array = np.frombuffer(f.read(array_desc['nbytes']),
dtype=DTYPE_DICT[typecode])
elif typecode in [2, 12]:
# These are 2 byte types, need to skip every two as they are not packed
array = np.frombuffer(f.read(array_desc['nbytes']*2),
dtype=DTYPE_DICT[typecode])[1::2]
else:
# Read bytes into list
array = []
for i in range(array_desc['nelements']):
dtype = typecode
data = _read_data(f, dtype)
array.append(data)
array = np.array(array, dtype=np.object_)
# Reshape array if needed
if array_desc['ndims'] > 1:
dims = array_desc['dims'][:int(array_desc['ndims'])]
dims.reverse()
array = array.reshape(dims)
# Go to next alignment position
_align_32(f)
return array
def _read_record(f):
'''Function to read in a full record'''
record = {'rectype': _read_long(f)}
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
_skip_bytes(f, 4)
if record['rectype'] not in RECTYPE_DICT:
raise Exception("Unknown RECTYPE: %i" % record['rectype'])
record['rectype'] = RECTYPE_DICT[record['rectype']]
if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
if record['rectype'] == "VARIABLE":
record['varname'] = _read_string(f)
else:
record['heap_index'] = _read_long(f)
_skip_bytes(f, 4)
rectypedesc = _read_typedesc(f)
if rectypedesc['typecode'] == 0:
if nextrec == f.tell():
record['data'] = None # Indicates NULL value
else:
raise ValueError("Unexpected type code: 0")
else:
varstart = _read_long(f)
if varstart != 7:
raise Exception("VARSTART is not 7")
if rectypedesc['structure']:
record['data'] = _read_structure(f, rectypedesc['array_desc'],
rectypedesc['struct_desc'])
elif rectypedesc['array']:
record['data'] = _read_array(f, rectypedesc['typecode'],
rectypedesc['array_desc'])
else:
dtype = rectypedesc['typecode']
record['data'] = _read_data(f, dtype)
elif record['rectype'] == "TIMESTAMP":
_skip_bytes(f, 4*256)
record['date'] = _read_string(f)
record['user'] = _read_string(f)
record['host'] = _read_string(f)
elif record['rectype'] == "VERSION":
record['format'] = _read_long(f)
record['arch'] = _read_string(f)
record['os'] = _read_string(f)
record['release'] = _read_string(f)
elif record['rectype'] == "IDENTIFICATON":
record['author'] = _read_string(f)
record['title'] = _read_string(f)
record['idcode'] = _read_string(f)
elif record['rectype'] == "NOTICE":
record['notice'] = _read_string(f)
elif record['rectype'] == "DESCRIPTION":
record['description'] = _read_string_data(f)
elif record['rectype'] == "HEAP_HEADER":
record['nvalues'] = _read_long(f)
record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
elif record['rectype'] == "COMMONBLOCK":
record['nvars'] = _read_long(f)
record['name'] = _read_string(f)
record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
elif record['rectype'] == "END_MARKER":
record['end'] = True
elif record['rectype'] == "UNKNOWN":
warnings.warn("Skipping UNKNOWN record")
elif record['rectype'] == "SYSTEM_VARIABLE":
warnings.warn("Skipping SYSTEM_VARIABLE record")
else:
raise Exception("record['rectype']=%s not implemented" %
record['rectype'])
f.seek(nextrec)
return record
def _read_typedesc(f):
'''Function to read in a type descriptor'''
typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
if typedesc['varflags'] & 2 == 2:
raise Exception("System variables not implemented")
typedesc['array'] = typedesc['varflags'] & 4 == 4
typedesc['structure'] = typedesc['varflags'] & 32 == 32
if typedesc['structure']:
typedesc['array_desc'] = _read_arraydesc(f)
typedesc['struct_desc'] = _read_structdesc(f)
elif typedesc['array']:
typedesc['array_desc'] = _read_arraydesc(f)
return typedesc
def _read_arraydesc(f):
'''Function to read in an array descriptor'''
arraydesc = {'arrstart': _read_long(f)}
if arraydesc['arrstart'] == 8:
_skip_bytes(f, 4)
arraydesc['nbytes'] = _read_long(f)
arraydesc['nelements'] = _read_long(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = _read_long(f)
arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
elif arraydesc['arrstart'] == 18:
warnings.warn("Using experimental 64-bit array read")
_skip_bytes(f, 8)
arraydesc['nbytes'] = _read_uint64(f)
arraydesc['nelements'] = _read_uint64(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = 8
arraydesc['dims'] = []
for d in range(arraydesc['nmax']):
v = _read_long(f)
if v != 0:
raise Exception("Expected a zero in ARRAY_DESC")
arraydesc['dims'].append(_read_long(f))
else:
raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
return arraydesc
def _read_structdesc(f):
'''Function to read in a structure descriptor'''
structdesc = {}
structstart = _read_long(f)
if structstart != 9:
raise Exception("STRUCTSTART should be 9")
structdesc['name'] = _read_string(f)
predef = _read_long(f)
structdesc['ntags'] = _read_long(f)
structdesc['nbytes'] = _read_long(f)
structdesc['predef'] = predef & 1
structdesc['inherits'] = predef & 2
structdesc['is_super'] = predef & 4
if not structdesc['predef']:
structdesc['tagtable'] = [_read_tagdesc(f)
for _ in range(structdesc['ntags'])]
for tag in structdesc['tagtable']:
tag['name'] = _read_string(f)
structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
for tag in structdesc['tagtable']
if tag['array']}
structdesc['structtable'] = {tag['name']: _read_structdesc(f)
for tag in structdesc['tagtable']
if tag['structure']}
if structdesc['inherits'] or structdesc['is_super']:
structdesc['classname'] = _read_string(f)
structdesc['nsupclasses'] = _read_long(f)
structdesc['supclassnames'] = [
_read_string(f) for _ in range(structdesc['nsupclasses'])]
structdesc['supclasstable'] = [
_read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
STRUCT_DICT[structdesc['name']] = structdesc
else:
if structdesc['name'] not in STRUCT_DICT:
raise Exception("PREDEF=1 but can't find definition")
structdesc = STRUCT_DICT[structdesc['name']]
return structdesc
def _read_tagdesc(f):
'''Function to read in a tag descriptor'''
tagdesc = {'offset': _read_long(f)}
if tagdesc['offset'] == -1:
tagdesc['offset'] = _read_uint64(f)
tagdesc['typecode'] = _read_long(f)
tagflags = _read_long(f)
tagdesc['array'] = tagflags & 4 == 4
tagdesc['structure'] = tagflags & 32 == 32
tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
# Assume '10'x is scalar
return tagdesc
def _replace_heap(variable, heap):
if isinstance(variable, Pointer):
while isinstance(variable, Pointer):
if variable.index == 0:
variable = None
else:
if variable.index in heap:
variable = heap[variable.index]
else:
warnings.warn("Variable referenced by pointer not found "
"in heap: variable will be set to None")
variable = None
replace, new = _replace_heap(variable, heap)
if replace:
variable = new
return True, variable
elif isinstance(variable, np.recarray):
# Loop over records
for ir, record in enumerate(variable):
replace, new = _replace_heap(record, heap)
if replace:
variable[ir] = new
return False, variable
elif isinstance(variable, np.record):
# Loop over values
for iv, value in enumerate(variable):
replace, new = _replace_heap(value, heap)
if replace:
variable[iv] = new
return False, variable
elif isinstance(variable, np.ndarray):
# Loop over values if type is np.object_
if variable.dtype.type is np.object_:
for iv in range(variable.size):
replace, new = _replace_heap(variable.item(iv), heap)
if replace:
variable.itemset(iv, new)
return False, variable
else:
return False, variable
class AttrDict(dict):
'''
A case-insensitive dictionary with access via item, attribute, and call
notations:
>>> d = AttrDict()
>>> d['Variable'] = 123
>>> d['Variable']
123
>>> d.Variable
123
>>> d.variable
123
>>> d('VARIABLE')
123
>>> d['missing']
Traceback (most recent error last):
...
KeyError: 'missing'
>>> d.missing
Traceback (most recent error last):
...
AttributeError: 'AttrDict' object has no attribute 'missing'
'''
def __init__(self, init={}):
dict.__init__(self, init)
def __getitem__(self, name):
return super().__getitem__(name.lower())
def __setitem__(self, key, value):
return super().__setitem__(key.lower(), value)
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
raise AttributeError(
f"'{type(self)}' object has no attribute '{name}'") from None
__setattr__ = __setitem__
__call__ = __getitem__
def readsav(file_name, idict=None, python_dict=False,
uncompressed_file_name=None, verbose=False):
"""
Read an IDL .sav file.
Parameters
----------
file_name : str
Name of the IDL save file.
idict : dict, optional
Dictionary in which to insert .sav file variables.
python_dict : bool, optional
By default, the object return is not a Python dictionary, but a
case-insensitive dictionary with item, attribute, and call access
to variables. To get a standard Python dictionary, set this option
to True.
uncompressed_file_name : str, optional
This option only has an effect for .sav files written with the
/compress option. If a file name is specified, compressed .sav
files are uncompressed to this file. Otherwise, readsav will use
the `tempfile` module to determine a temporary filename
automatically, and will remove the temporary file upon successfully
reading it in.
verbose : bool, optional
Whether to print out information about the save file, including
the records read, and available variables.
Returns
-------
idl_dict : AttrDict or dict
If `python_dict` is set to False (default), this function returns a
case-insensitive dictionary with item, attribute, and call access
to variables. If `python_dict` is set to True, this function
returns a Python dictionary with all variable names in lowercase.
If `idict` was specified, then variables are written to the
dictionary specified, and the updated dictionary is returned.
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
>>> from scipy.io import readsav
Get the filename for an example .sav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
Load the .sav file contents.
>>> sav_data = readsav(sav_fname)
Get keys of the .sav file contents.
>>> print(sav_data.keys())
dict_keys(['array1d'])
Access a content with a key.
>>> print(sav_data['array1d'])
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.]
"""
# Initialize record and variable holders
records = []
if python_dict or idict:
variables = {}
else:
variables = AttrDict()
# Open the IDL file
f = open(file_name, 'rb')
# Read the signature, which should be 'SR'
signature = _read_bytes(f, 2)
if signature != b'SR':
raise Exception("Invalid SIGNATURE: %s" % signature)
# Next, the record format, which is '\x00\x04' for normal .sav
# files, and '\x00\x06' for compressed .sav files.
recfmt = _read_bytes(f, 2)
if recfmt == b'\x00\x04':
pass
elif recfmt == b'\x00\x06':
if verbose:
print("IDL Save file is compressed")
if uncompressed_file_name:
fout = open(uncompressed_file_name, 'w+b')
else:
fout = tempfile.NamedTemporaryFile(suffix='.sav')
if verbose:
print(" -> expanding to %s" % fout.name)
# Write header
fout.write(b'SR\x00\x04')
# Cycle through records
while True:
# Read record type
rectype = _read_long(f)
fout.write(struct.pack('>l', int(rectype)))
# Read position of next record and return as int
nextrec = _read_uint32(f)
nextrec += _read_uint32(f) * 2**32
# Read the unknown 4 bytes
unknown = f.read(4)
# Check if the end of the file has been reached
if RECTYPE_DICT[rectype] == 'END_MARKER':
modval = np.int64(2**32)
fout.write(struct.pack('>I', int(nextrec) % modval))
fout.write(struct.pack('>I', int((nextrec - (nextrec % modval)) / modval)))
fout.write(unknown)
break
# Find current position
pos = f.tell()
# Decompress record
rec_string = zlib.decompress(f.read(nextrec-pos))
# Find new position of next record
nextrec = fout.tell() + len(rec_string) + 12
# Write out record
fout.write(struct.pack('>I', int(nextrec % 2**32)))
fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
fout.write(unknown)
fout.write(rec_string)
# Close the original compressed file
f.close()
# Set f to be the decompressed file, and skip the first four bytes
f = fout
f.seek(4)
else:
raise Exception("Invalid RECFMT: %s" % recfmt)
# Loop through records, and add them to the list
while True:
r = _read_record(f)
records.append(r)
if 'end' in r:
if r['end']:
break
# Close the file
f.close()
# Find heap data variables
heap = {}
for r in records:
if r['rectype'] == "HEAP_DATA":
heap[r['heap_index']] = r['data']
# Find all variables
for r in records:
if r['rectype'] == "VARIABLE":
replace, new = _replace_heap(r['data'], heap)
if replace:
r['data'] = new
variables[r['varname'].lower()] = r['data']
if verbose:
# Print out timestamp info about the file
for record in records:
if record['rectype'] == "TIMESTAMP":
print("-"*50)
print("Date: %s" % record['date'])
print("User: %s" % record['user'])
print("Host: %s" % record['host'])
break
# Print out version info about the file
for record in records:
if record['rectype'] == "VERSION":
print("-"*50)
print("Format: %s" % record['format'])
print("Architecture: %s" % record['arch'])
print("Operating System: %s" % record['os'])
print("IDL Version: %s" % record['release'])
break
# Print out identification info about the file
for record in records:
if record['rectype'] == "IDENTIFICATON":
print("-"*50)
print("Author: %s" % record['author'])
print("Title: %s" % record['title'])
print("ID Code: %s" % record['idcode'])
break
# Print out descriptions saved with the file
for record in records:
if record['rectype'] == "DESCRIPTION":
print("-"*50)
print("Description: %s" % record['description'])
break
print("-"*50)
print("Successfully read %i records of which:" %
(len(records)))
# Create convenience list of record types
rectypes = [r['rectype'] for r in records]
for rt in set(rectypes):
if rt != 'END_MARKER':
print(" - %i are of type %s" % (rectypes.count(rt), rt))
print("-"*50)
if 'VARIABLE' in rectypes:
print("Available variables:")
for var in variables:
print(f" - {var} [{type(variables[var])}]")
print("-"*50)
if idict:
for var in variables:
idict[var] = variables[var]
return idict
else:
return variables
| 26,898
| 28.397814
| 91
|
py
|
scipy
|
scipy-main/scipy/io/matlab/mio5_utils.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio5_utils
__all__ = [ # noqa: F822
'VarHeader5', 'VarReader5', 'byteswap_u4', 'chars_to_strings',
'csc_matrix', 'mio5p', 'miob', 'pycopy', 'swapped_code', 'squeeze_element'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio5_utils is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio5_utils` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio5_utils, name)
| 899
| 30.034483
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/streams.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _streams
__all__ = [ # noqa: F822
'BLOCK_SIZE', 'GenericStream', 'ZlibInputStream', 'make_stream'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.streams is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.streams` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_streams, name)
| 809
| 27.928571
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/_miobase.py
|
# Authors: Travis Oliphant, Matthew Brett
"""
Base classes for MATLAB file stream reading.
MATLAB is a registered trademark of the Mathworks inc.
"""
import numpy as np
from scipy._lib import doccer
from . import _byteordercodes as boc
__all__ = [
'MatFileReader', 'MatReadError', 'MatReadWarning',
'MatVarReader', 'MatWriteError', 'arr_dtype_number',
'arr_to_chars', 'convert_dtypes', 'doc_dict',
'docfiller', 'get_matfile_version',
'matdims', 'read_dtype'
]
class MatReadError(Exception):
"""Exception indicating a read issue."""
class MatWriteError(Exception):
"""Exception indicating a write issue."""
class MatReadWarning(UserWarning):
"""Warning class for read issues."""
doc_dict = \
{'file_arg':
'''file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True) Can also pass open file-like object.''',
'append_arg':
'''appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present. Default is True.''',
'load_args':
'''byte_order : str or None, optional
None by default, implying byte order guessed from mat
file. Otherwise can be one of ('native', '=', 'little', '<',
'BIG', '>').
mat_dtype : bool, optional
If True, return arrays in same dtype as would be loaded into
MATLAB (instead of the dtype with which they are saved).
squeeze_me : bool, optional
Whether to squeeze unit matrix dimensions or not.
chars_as_strings : bool, optional
Whether to convert char arrays to string arrays.
matlab_compatible : bool, optional
Returns matrices as would be loaded by MATLAB (implies
squeeze_me=False, chars_as_strings=False, mat_dtype=True,
struct_as_record=True).''',
'struct_arg':
'''struct_as_record : bool, optional
Whether to load MATLAB structs as NumPy record arrays, or as
old-style NumPy arrays with dtype=object. Setting this flag to
False replicates the behavior of SciPy version 0.7.x (returning
numpy object arrays). The default setting is True, because it
allows easier round-trip load and save of MATLAB files.''',
'matstream_arg':
'''mat_stream : file-like
Object with file API, open for reading.''',
'long_fields':
'''long_field_names : bool, optional
* False - maximum field name length in a structure is 31 characters
which is the documented maximum length. This is the default.
* True - maximum field name length in a structure is 63 characters
which works for MATLAB 7.6''',
'do_compression':
'''do_compression : bool, optional
Whether to compress matrices on write. Default is False.''',
'oned_as':
'''oned_as : {'row', 'column'}, optional
If 'column', write 1-D NumPy arrays as column vectors.
If 'row', write 1D NumPy arrays as row vectors.''',
'unicode_strings':
'''unicode_strings : bool, optional
If True, write strings as Unicode, else MATLAB usual encoding.'''}
docfiller = doccer.filldoc(doc_dict)
'''
Note on architecture
======================
There are three sets of parameters relevant for reading files. The
first are *file read parameters* - containing options that are common
for reading the whole file, and therefore every variable within that
file. At the moment these are:
* mat_stream
* dtypes (derived from byte code)
* byte_order
* chars_as_strings
* squeeze_me
* struct_as_record (MATLAB 5 files)
* class_dtypes (derived from order code, MATLAB 5 files)
* codecs (MATLAB 5 files)
* uint16_codec (MATLAB 5 files)
Another set of parameters are those that apply only to the current
variable being read - the *header*:
* header related variables (different for v4 and v5 mat files)
* is_complex
* mclass
* var_stream
With the header, we need ``next_position`` to tell us where the next
variable in the stream is.
Then, for each element in a matrix, there can be *element read
parameters*. An element is, for example, one element in a MATLAB cell
array. At the moment, these are:
* mat_dtype
The file-reading object contains the *file read parameters*. The
*header* is passed around as a data object, or may be read and discarded
in a single function. The *element read parameters* - the mat_dtype in
this instance, is passed into a general post-processing function - see
``mio_utils`` for details.
'''
def convert_dtypes(dtype_template, order_code):
''' Convert dtypes in mapping to given order
Parameters
----------
dtype_template : mapping
mapping with values returning numpy dtype from ``np.dtype(val)``
order_code : str
an order code suitable for using in ``dtype.newbyteorder()``
Returns
-------
dtypes : mapping
mapping where values have been replaced by
``np.dtype(val).newbyteorder(order_code)``
'''
dtypes = dtype_template.copy()
for k in dtypes:
dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code)
return dtypes
def read_dtype(mat_stream, a_dtype):
"""
Generic get of byte stream data of known type
Parameters
----------
mat_stream : file_like object
MATLAB (tm) mat file stream
a_dtype : dtype
dtype of array to read. `a_dtype` is assumed to be correct
endianness.
Returns
-------
arr : ndarray
Array of dtype `a_dtype` read from stream.
"""
num_bytes = a_dtype.itemsize
arr = np.ndarray(shape=(),
dtype=a_dtype,
buffer=mat_stream.read(num_bytes),
order='F')
return arr
def matfile_version(file_name, *, appendmat=True):
"""
Return major, minor tuple depending on apparent mat file type
Where:
#. 0,x -> version 4 format mat files
#. 1,x -> version 5 format mat files
#. 2,x -> version 7.3 format mat files (HDF format)
Parameters
----------
file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True). Can also pass open file-like object.
appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present. Default is True.
Returns
-------
major_version : {0, 1, 2}
major MATLAB File format version
minor_version : int
minor MATLAB file format version
Raises
------
MatReadError
If the file is empty.
ValueError
The matfile version is unknown.
Notes
-----
Has the side effect of setting the file read pointer to 0
"""
from ._mio import _open_file_context
with _open_file_context(file_name, appendmat=appendmat) as fileobj:
return _get_matfile_version(fileobj)
get_matfile_version = matfile_version
def _get_matfile_version(fileobj):
# Mat4 files have a zero somewhere in first 4 bytes
fileobj.seek(0)
mopt_bytes = fileobj.read(4)
if len(mopt_bytes) == 0:
raise MatReadError("Mat file appears to be empty")
mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes)
if 0 in mopt_ints:
fileobj.seek(0)
return (0,0)
# For 5 format or 7.3 format we need to read an integer in the
# header. Bytes 124 through 128 contain a version integer and an
# endian test string
fileobj.seek(124)
tst_str = fileobj.read(4)
fileobj.seek(0)
maj_ind = int(tst_str[2] == b'I'[0])
maj_val = int(tst_str[maj_ind])
min_val = int(tst_str[1 - maj_ind])
ret = (maj_val, min_val)
if maj_val in (1, 2):
return ret
raise ValueError('Unknown mat file type, version %s, %s' % ret)
def matdims(arr, oned_as='column'):
"""
Determine equivalent MATLAB dimensions for given array
Parameters
----------
arr : ndarray
Input array
oned_as : {'column', 'row'}, optional
Whether 1-D arrays are returned as MATLAB row or column matrices.
Default is 'column'.
Returns
-------
dims : tuple
Shape tuple, in the form MATLAB expects it.
Notes
-----
We had to decide what shape a 1 dimensional array would be by
default. ``np.atleast_2d`` thinks it is a row vector. The
default for a vector in MATLAB (e.g., ``>> 1:12``) is a row vector.
Versions of scipy up to and including 0.11 resulted (accidentally)
in 1-D arrays being read as column vectors. For the moment, we
maintain the same tradition here.
Examples
--------
>>> matdims(np.array(1)) # NumPy scalar
(1, 1)
>>> matdims(np.array([1])) # 1-D array, 1 element
(1, 1)
>>> matdims(np.array([1,2])) # 1-D array, 2 elements
(2, 1)
>>> matdims(np.array([[2],[3]])) # 2-D array, column vector
(2, 1)
>>> matdims(np.array([[2,3]])) # 2-D array, row vector
(1, 2)
>>> matdims(np.array([[[2,3]]])) # 3-D array, rowish vector
(1, 1, 2)
>>> matdims(np.array([])) # empty 1-D array
(0, 0)
>>> matdims(np.array([[]])) # empty 2-D array
(0, 0)
>>> matdims(np.array([[[]]])) # empty 3-D array
(0, 0, 0)
Optional argument flips 1-D shape behavior.
>>> matdims(np.array([1,2]), 'row') # 1-D array, 2 elements
(1, 2)
The argument has to make sense though
>>> matdims(np.array([1,2]), 'bizarre')
Traceback (most recent call last):
...
ValueError: 1-D option "bizarre" is strange
"""
shape = arr.shape
if shape == (): # scalar
return (1, 1)
if len(shape) == 1: # 1D
if shape[0] == 0:
return (0, 0)
elif oned_as == 'column':
return shape + (1,)
elif oned_as == 'row':
return (1,) + shape
else:
raise ValueError('1-D option "%s" is strange'
% oned_as)
return shape
class MatVarReader:
''' Abstract class defining required interface for var readers'''
def __init__(self, file_reader):
pass
def read_header(self):
''' Returns header '''
pass
def array_from_header(self, header):
''' Reads array given header '''
pass
class MatFileReader:
""" Base object for reading mat files
To make this class functional, you will need to override the
following methods:
matrix_getter_factory - gives object to fetch next matrix from stream
guess_byte_order - guesses file byte order from file
"""
@docfiller
def __init__(self, mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
simplify_cells=False):
'''
Initializer for mat file reader
mat_stream : file-like
object with file API, open for reading
%(load_args)s
'''
# Initialize stream
self.mat_stream = mat_stream
self.dtypes = {}
if not byte_order:
byte_order = self.guess_byte_order()
else:
byte_order = boc.to_numpy_code(byte_order)
self.byte_order = byte_order
self.struct_as_record = struct_as_record
if matlab_compatible:
self.set_matlab_compatible()
else:
self.squeeze_me = squeeze_me
self.chars_as_strings = chars_as_strings
self.mat_dtype = mat_dtype
self.verify_compressed_data_integrity = verify_compressed_data_integrity
self.simplify_cells = simplify_cells
if simplify_cells:
self.squeeze_me = True
self.struct_as_record = False
def set_matlab_compatible(self):
''' Sets options to return arrays as MATLAB loads them '''
self.mat_dtype = True
self.squeeze_me = False
self.chars_as_strings = False
def guess_byte_order(self):
''' As we do not know what file type we have, assume native '''
return boc.native_code
def end_of_stream(self):
b = self.mat_stream.read(1)
curpos = self.mat_stream.tell()
self.mat_stream.seek(curpos-1)
return len(b) == 0
def arr_dtype_number(arr, num):
''' Return dtype for given number of items per element'''
return np.dtype(arr.dtype.str[:2] + str(num))
def arr_to_chars(arr):
''' Convert string array to char array '''
dims = list(arr.shape)
if not dims:
dims = [1]
dims.append(int(arr.dtype.str[2:]))
arr = np.ndarray(shape=dims,
dtype=arr_dtype_number(arr, 1),
buffer=arr)
empties = [arr == np.array('', dtype=arr.dtype)]
if not np.any(empties):
return arr
arr = arr.copy()
arr[tuple(empties)] = ' '
return arr
| 12,875
| 29.084112
| 80
|
py
|
scipy
|
scipy-main/scipy/io/matlab/byteordercodes.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _byteordercodes
__all__ = [ # noqa: F822
'aliases', 'native_code', 'swapped_code',
'sys_is_le', 'to_numpy_code'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.byteordercodes is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.byteordercodes` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_byteordercodes, name)
| 849
| 27.333333
| 82
|
py
|
scipy
|
scipy-main/scipy/io/matlab/mio5.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio5
__all__ = [ # noqa: F822
'mclass_info', 'mxCHAR_CLASS', 'mxSPARSE_CLASS',
'BytesIO', 'native_code',
'swapped_code', 'MatFileReader', 'docfiller', 'matdims',
'read_dtype', 'arr_to_chars', 'arr_dtype_number', 'MatWriteError',
'MatReadError', 'MatReadWarning', 'VarReader5', 'MatlabObject',
'MatlabFunction', 'MDTYPES', 'NP_TO_MTYPES', 'NP_TO_MXTYPES',
'miCOMPRESSED', 'miMATRIX', 'miINT8', 'miUTF8', 'miUINT32',
'mxCELL_CLASS', 'mxSTRUCT_CLASS', 'mxOBJECT_CLASS', 'mxDOUBLE_CLASS',
'mat_struct', 'ZlibInputStream', 'MatFile5Reader', 'varmats_from_mat',
'EmptyStructMarker', 'to_writeable', 'NDT_FILE_HDR', 'NDT_TAG_FULL',
'NDT_TAG_SMALL', 'NDT_ARRAY_FLAGS', 'VarWriter5', 'MatFile5Writer'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio5 is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio5` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio5, name)
| 1,435
| 36.789474
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/setup.py
|
def configuration(parent_package='io',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('matlab', parent_package, top_path)
config.add_extension('_streams', sources=['_streams.c'])
config.add_extension('_mio_utils', sources=['_mio_utils.c'])
config.add_extension('_mio5_utils', sources=['_mio5_utils.c'])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 538
| 37.5
| 66
|
py
|
scipy
|
scipy-main/scipy/io/matlab/_mio4.py
|
''' Classes for read / write of matlab (TM) 4 files
'''
import sys
import warnings
import numpy as np
import scipy.sparse
from ._miobase import (MatFileReader, docfiller, matdims, read_dtype,
convert_dtypes, arr_to_chars, arr_dtype_number)
from ._mio_utils import squeeze_element, chars_to_strings
from functools import reduce
__all__ = [
'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN',
'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info',
'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE',
'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS',
'np_to_mtypes', 'order_codes'
]
SYS_LITTLE_ENDIAN = sys.byteorder == 'little'
miDOUBLE = 0
miSINGLE = 1
miINT32 = 2
miINT16 = 3
miUINT16 = 4
miUINT8 = 5
mdtypes_template = {
miDOUBLE: 'f8',
miSINGLE: 'f4',
miINT32: 'i4',
miINT16: 'i2',
miUINT16: 'u2',
miUINT8: 'u1',
'header': [('mopt', 'i4'),
('mrows', 'i4'),
('ncols', 'i4'),
('imagf', 'i4'),
('namlen', 'i4')],
'U1': 'U1',
}
np_to_mtypes = {
'f8': miDOUBLE,
'c32': miDOUBLE,
'c24': miDOUBLE,
'c16': miDOUBLE,
'f4': miSINGLE,
'c8': miSINGLE,
'i4': miINT32,
'i2': miINT16,
'u2': miUINT16,
'u1': miUINT8,
'S1': miUINT8,
}
# matrix classes
mxFULL_CLASS = 0
mxCHAR_CLASS = 1
mxSPARSE_CLASS = 2
order_codes = {
0: '<',
1: '>',
2: 'VAX D-float', # !
3: 'VAX G-float',
4: 'Cray', # !!
}
mclass_info = {
mxFULL_CLASS: 'double',
mxCHAR_CLASS: 'char',
mxSPARSE_CLASS: 'sparse',
}
class VarHeader4:
# Mat4 variables never logical or global
is_logical = False
is_global = False
def __init__(self,
name,
dtype,
mclass,
dims,
is_complex):
self.name = name
self.dtype = dtype
self.mclass = mclass
self.dims = dims
self.is_complex = is_complex
class VarReader4:
''' Class to read matlab 4 variables '''
def __init__(self, file_reader):
self.file_reader = file_reader
self.mat_stream = file_reader.mat_stream
self.dtypes = file_reader.dtypes
self.chars_as_strings = file_reader.chars_as_strings
self.squeeze_me = file_reader.squeeze_me
def read_header(self):
''' Read and return header for variable '''
data = read_dtype(self.mat_stream, self.dtypes['header'])
name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00')
if data['mopt'] < 0 or data['mopt'] > 5000:
raise ValueError('Mat 4 mopt wrong format, byteswapping problem?')
M, rest = divmod(data['mopt'], 1000) # order code
if M not in (0, 1):
warnings.warn("We do not support byte ordering '%s'; returned "
"data may be corrupt" % order_codes[M],
UserWarning)
O, rest = divmod(rest, 100) # unused, should be 0
if O != 0:
raise ValueError('O in MOPT integer should be 0, wrong format?')
P, rest = divmod(rest, 10) # data type code e.g miDOUBLE (see above)
T = rest # matrix type code e.g., mxFULL_CLASS (see above)
dims = (data['mrows'], data['ncols'])
is_complex = data['imagf'] == 1
dtype = self.dtypes[P]
return VarHeader4(
name,
dtype,
T,
dims,
is_complex)
def array_from_header(self, hdr, process=True):
mclass = hdr.mclass
if mclass == mxFULL_CLASS:
arr = self.read_full_array(hdr)
elif mclass == mxCHAR_CLASS:
arr = self.read_char_array(hdr)
if process and self.chars_as_strings:
arr = chars_to_strings(arr)
elif mclass == mxSPARSE_CLASS:
# no current processing (below) makes sense for sparse
return self.read_sparse_array(hdr)
else:
raise TypeError('No reader for class code %s' % mclass)
if process and self.squeeze_me:
return squeeze_element(arr)
return arr
def read_sub_array(self, hdr, copy=True):
''' Mat4 read using header `hdr` dtype and dims
Parameters
----------
hdr : object
object with attributes ``dtype``, ``dims``. dtype is assumed to be
the correct endianness
copy : bool, optional
copies array before return if True (default True)
(buffer is usually read only)
Returns
-------
arr : ndarray
of dtype given by `hdr` ``dtype`` and shape given by `hdr` ``dims``
'''
dt = hdr.dtype
dims = hdr.dims
num_bytes = dt.itemsize
for d in dims:
num_bytes *= d
buffer = self.mat_stream.read(int(num_bytes))
if len(buffer) != num_bytes:
raise ValueError("Not enough bytes to read matrix '%s'; is this "
"a badly-formed file? Consider listing matrices "
"with `whosmat` and loading named matrices with "
"`variable_names` kwarg to `loadmat`" % hdr.name)
arr = np.ndarray(shape=dims,
dtype=dt,
buffer=buffer,
order='F')
if copy:
arr = arr.copy()
return arr
def read_full_array(self, hdr):
''' Full (rather than sparse) matrix getter
Read matrix (array) can be real or complex
Parameters
----------
hdr : ``VarHeader4`` instance
Returns
-------
arr : ndarray
complex array if ``hdr.is_complex`` is True, otherwise a real
numeric array
'''
if hdr.is_complex:
# avoid array copy to save memory
res = self.read_sub_array(hdr, copy=False)
res_j = self.read_sub_array(hdr, copy=False)
return res + (res_j * 1j)
return self.read_sub_array(hdr)
def read_char_array(self, hdr):
''' latin-1 text matrix (char matrix) reader
Parameters
----------
hdr : ``VarHeader4`` instance
Returns
-------
arr : ndarray
with dtype 'U1', shape given by `hdr` ``dims``
'''
arr = self.read_sub_array(hdr).astype(np.uint8)
S = arr.tobytes().decode('latin-1')
return np.ndarray(shape=hdr.dims,
dtype=np.dtype('U1'),
buffer=np.array(S)).copy()
def read_sparse_array(self, hdr):
''' Read and return sparse matrix type
Parameters
----------
hdr : ``VarHeader4`` instance
Returns
-------
arr : ``scipy.sparse.coo_matrix``
with dtype ``float`` and shape read from the sparse matrix data
Notes
-----
MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where
N is the number of non-zero values. Column 1 values [0:N] are the
(1-based) row indices of the each non-zero value, column 2 [0:N] are the
column indices, column 3 [0:N] are the (real) values. The last values
[-1,0:2] of the rows, column indices are shape[0] and shape[1]
respectively of the output matrix. The last value for the values column
is a padding 0. mrows and ncols values from the header give the shape of
the stored matrix, here [N+1, 3]. Complex data are saved as a 4 column
matrix, where the fourth column contains the imaginary component; the
last value is again 0. Complex sparse data do *not* have the header
``imagf`` field set to True; the fact that the data are complex is only
detectable because there are 4 storage columns.
'''
res = self.read_sub_array(hdr)
tmp = res[:-1,:]
# All numbers are float64 in Matlab, but SciPy sparse expects int shape
dims = (int(res[-1,0]), int(res[-1,1]))
I = np.ascontiguousarray(tmp[:,0],dtype='intc') # fixes byte order also
J = np.ascontiguousarray(tmp[:,1],dtype='intc')
I -= 1 # for 1-based indexing
J -= 1
if res.shape[1] == 3:
V = np.ascontiguousarray(tmp[:,2],dtype='float')
else:
V = np.ascontiguousarray(tmp[:,2],dtype='complex')
V.imag = tmp[:,3]
return scipy.sparse.coo_matrix((V,(I,J)), dims)
def shape_from_header(self, hdr):
'''Read the shape of the array described by the header.
The file position after this call is unspecified.
'''
mclass = hdr.mclass
if mclass == mxFULL_CLASS:
shape = tuple(map(int, hdr.dims))
elif mclass == mxCHAR_CLASS:
shape = tuple(map(int, hdr.dims))
if self.chars_as_strings:
shape = shape[:-1]
elif mclass == mxSPARSE_CLASS:
dt = hdr.dtype
dims = hdr.dims
if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1):
return ()
# Read only the row and column counts
self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
rows = np.ndarray(shape=(), dtype=dt,
buffer=self.mat_stream.read(dt.itemsize))
self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
cols = np.ndarray(shape=(), dtype=dt,
buffer=self.mat_stream.read(dt.itemsize))
shape = (int(rows), int(cols))
else:
raise TypeError('No reader for class code %s' % mclass)
if self.squeeze_me:
shape = tuple([x for x in shape if x != 1])
return shape
class MatFile4Reader(MatFileReader):
''' Reader for Mat4 files '''
@docfiller
def __init__(self, mat_stream, *args, **kwargs):
''' Initialize matlab 4 file reader
%(matstream_arg)s
%(load_args)s
'''
super().__init__(mat_stream, *args, **kwargs)
self._matrix_reader = None
def guess_byte_order(self):
self.mat_stream.seek(0)
mopt = read_dtype(self.mat_stream, np.dtype('i4'))
self.mat_stream.seek(0)
if mopt == 0:
return '<'
if mopt < 0 or mopt > 5000:
# Number must have been byteswapped
return SYS_LITTLE_ENDIAN and '>' or '<'
# Not byteswapped
return SYS_LITTLE_ENDIAN and '<' or '>'
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
self.dtypes = convert_dtypes(mdtypes_template, self.byte_order)
self._matrix_reader = VarReader4(self)
def read_var_header(self):
''' Read and return header, next position
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes ``name`` and ``is_global``
next_position : int
position in stream of next variable
'''
hdr = self._matrix_reader.read_header()
n = reduce(lambda x, y: x*y, hdr.dims, 1) # fast product
remaining_bytes = hdr.dtype.itemsize * n
if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS:
remaining_bytes *= 2
next_position = self.mat_stream.tell() + remaining_bytes
return hdr, next_position
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False}, optional
If True, apply recursive post-processing during loading of array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
Parameters
----------
variable_names : None or str or sequence of str, optional
variable name, or sequence of variable names to get from Mat file /
file stream. If None, then get all variables in file.
'''
if isinstance(variable_names, str):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# set up variable reader
self.initialize_read()
mdict = {}
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
mdict[name] = self.read_var_array(hdr)
self.mat_stream.seek(next_position)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# set up variable reader
self.initialize_read()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
shape = self._matrix_reader.shape_from_header(hdr)
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def arr_to_2d(arr, oned_as='row'):
''' Make ``arr`` exactly two dimensional
If `arr` has more than 2 dimensions, raise a ValueError
Parameters
----------
arr : array
oned_as : {'row', 'column'}, optional
Whether to reshape 1-D vectors as row vectors or column vectors.
See documentation for ``matdims`` for more detail
Returns
-------
arr2d : array
2-D version of the array
'''
dims = matdims(arr, oned_as)
if len(dims) > 2:
raise ValueError('Matlab 4 files cannot save arrays with more than '
'2 dimensions')
return arr.reshape(dims)
class VarWriter4:
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.oned_as = file_writer.oned_as
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0):
''' Write header for given data options
Parameters
----------
name : str
name of variable
shape : sequence
Shape of array as it will be read in matlab
P : int, optional
code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32,
miINT16, miUINT16, miUINT8``
T : int, optional
code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS,
mxSPARSE_CLASS``
imagf : int, optional
flag indicating complex
'''
header = np.empty((), mdtypes_template['header'])
M = not SYS_LITTLE_ENDIAN
O = 0
header['mopt'] = (M * 1000 +
O * 100 +
P * 10 +
T)
header['mrows'] = shape[0]
header['ncols'] = shape[1]
header['imagf'] = imagf
header['namlen'] = len(name) + 1
self.write_bytes(header)
data = name + '\0'
self.write_string(data.encode('latin1'))
def write(self, arr, name):
''' Write matrix `arr`, with name `name`
Parameters
----------
arr : array_like
array to write
name : str
name in matlab workspace
'''
# we need to catch sparse first, because np.asarray returns an
# an object array for scipy.sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr, name)
return
arr = np.asarray(arr)
dt = arr.dtype
if not dt.isnative:
arr = arr.astype(dt.newbyteorder('='))
dtt = dt.type
if dtt is np.object_:
raise TypeError('Cannot save object arrays in Mat4')
elif dtt is np.void:
raise TypeError('Cannot save void type arrays')
elif dtt in (np.unicode_, np.string_):
self.write_char(arr, name)
return
self.write_numeric(arr, name)
def write_numeric(self, arr, name):
arr = arr_to_2d(arr, self.oned_as)
imagf = arr.dtype.kind == 'c'
try:
P = np_to_mtypes[arr.dtype.str[1:]]
except KeyError:
if imagf:
arr = arr.astype('c128')
else:
arr = arr.astype('f8')
P = miDOUBLE
self.write_header(name,
arr.shape,
P=P,
T=mxFULL_CLASS,
imagf=imagf)
if imagf:
self.write_bytes(arr.real)
self.write_bytes(arr.imag)
else:
self.write_bytes(arr)
def write_char(self, arr, name):
arr = arr_to_chars(arr)
arr = arr_to_2d(arr, self.oned_as)
dims = arr.shape
self.write_header(
name,
dims,
P=miUINT8,
T=mxCHAR_CLASS)
if arr.dtype.kind == 'U':
# Recode unicode to latin1
n_chars = np.prod(dims)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr)
st = st_arr.item().encode('latin-1')
arr = np.ndarray(shape=dims, dtype='S1', buffer=st)
self.write_bytes(arr)
def write_sparse(self, arr, name):
''' Sparse matrices are 2-D
See docstring for VarReader4.read_sparse_array
'''
A = arr.tocoo() # convert to sparse COO format (ijv)
imagf = A.dtype.kind == 'c'
ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8')
ijv[:-1,0] = A.row
ijv[:-1,1] = A.col
ijv[:-1,0:2] += 1 # 1 based indexing
if imagf:
ijv[:-1,2] = A.data.real
ijv[:-1,3] = A.data.imag
else:
ijv[:-1,2] = A.data
ijv[-1,0:2] = A.shape
self.write_header(
name,
ijv.shape,
P=miDOUBLE,
T=mxSPARSE_CLASS)
self.write_bytes(ijv)
class MatFile4Writer:
''' Class for writing matlab 4 format files '''
def __init__(self, file_stream, oned_as=None):
self.file_stream = file_stream
if oned_as is None:
oned_as = 'row'
self.oned_as = oned_as
self._matrix_writer = None
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` return name, contents pairs
where ``name`` which will appeak in the matlab workspace in
file load, and ``contents`` is something writeable to a
matlab file, such as a NumPy array.
write_header : {None, True, False}
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# there is no header for a matlab 4 mat file, so we ignore the
# ``write_header`` input argument. It's there for compatibility
# with the matlab 5 version of this method
self._matrix_writer = VarWriter4(self)
for name, var in mdict.items():
self._matrix_writer.write(var, name)
| 20,612
| 32.033654
| 80
|
py
|
scipy
|
scipy-main/scipy/io/matlab/mio5_params.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio5_params
__all__ = [ # noqa: F822
'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque',
'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template',
'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template',
'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8',
'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8',
'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS',
'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS',
'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS',
'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS',
'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS',
'mxUINT64_CLASS', 'mxUINT8_CLASS', 'convert_dtypes'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio5_params is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio5_params` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio5_params, name)
| 1,526
| 39.184211
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/_mio5.py
|
''' Classes for read / write of matlab (TM) 5 files
The matfile specification last found here:
https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
(as of December 5 2008)
=================================
Note on functions and mat files
=================================
The document above does not give any hints as to the storage of matlab
function handles, or anonymous function handles. I had, therefore, to
guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
``mxOPAQUE_CLASS`` by looking at example mat files.
``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
contain a struct matrix with a set pattern of fields. For anonymous
functions, a sub-fields of one of these fields seems to contain the
well-named ``mxOPAQUE_CLASS``. This seems to contain:
* array flags as for any matlab matrix
* 3 int8 strings
* a matrix
It seems that whenever the mat file contains a ``mxOPAQUE_CLASS``
instance, there is also an un-named matrix (name == '') at the end of
the mat file. I'll call this the ``__function_workspace__`` matrix.
When I saved two anonymous functions in a mat file, or appended another
anonymous function to the mat file, there was still only one
``__function_workspace__`` un-named matrix at the end, but larger than
that for a mat file with a single anonymous function, suggesting that
the workspaces for the two functions had been merged.
The ``__function_workspace__`` matrix appears to be of double class
(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
the format of a mini .mat file, without the first 124 bytes of the file
header (the description and the subsystem_offset), but with the version
U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
presumably for 8 byte padding, and then a series of ``miMATRIX``
entries, as in a standard mat file. The ``miMATRIX`` entries appear to
be series of un-named (name == '') matrices, and may also contain arrays
of this same mini-mat format.
I guess that:
* saving an anonymous function back to a mat file will need the
associated ``__function_workspace__`` matrix saved as well for the
anonymous function to work correctly.
* appending to a mat file that has a ``__function_workspace__`` would
involve first pulling off this workspace, appending, checking whether
there were any more anonymous functions appended, and then somehow
merging the relevant workspaces, and saving at the end of the mat
file.
The mat files I was playing with are in ``tests/data``:
* sqr.mat
* parabola.mat
* some_functions.mat
See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging
script I was working with.
Small fragments of current code adapted from matfile.py by Heiko
Henkelmann; parts of the code for simplify_cells=True adapted from
http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.
'''
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
import scipy.sparse
from ._byteordercodes import native_code, swapped_code
from ._miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
# Reader object for matlab 5 format variables
from ._mio5_utils import VarReader5
# Constants and helper objects
from ._mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info, mat_struct)
from ._streams import ZlibInputStream
def _has_struct(elem):
"""Determine if elem is an array and if first array item is a struct."""
return (isinstance(elem, np.ndarray) and (elem.size > 0) and (elem.ndim > 0) and
isinstance(elem[0], mat_struct))
def _inspect_cell_array(ndarray):
"""Construct lists from cell arrays (loaded as numpy ndarrays), recursing
into items if they contain mat_struct objects."""
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, mat_struct):
elem_list.append(_matstruct_to_dict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_inspect_cell_array(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
def _matstruct_to_dict(matobj):
"""Construct nested dicts from mat_struct objects."""
d = {}
for f in matobj._fieldnames:
elem = matobj.__dict__[f]
if isinstance(elem, mat_struct):
d[f] = _matstruct_to_dict(elem)
elif _has_struct(elem):
d[f] = _inspect_cell_array(elem)
else:
d[f] = elem
return d
def _simplify_cells(d):
"""Convert mat objects in dict to nested dicts."""
for key in d:
if isinstance(d[key], mat_struct):
d[key] = _matstruct_to_dict(d[key])
elif _has_struct(d[key]):
d[key] = _inspect_cell_array(d[key])
return d
class MatFile5Reader(MatFileReader):
''' Reader for Mat 5 mat files
Adds the following attribute to base class
uint16_codec - char codec to use for uint16 char arrays
(defaults to system default codec)
Uses variable reader that has the following stardard interface (see
abstract class in ``miobase``::
__init__(self, file_reader)
read_header(self)
array_from_header(self)
and added interface::
set_stream(self, stream)
read_full_tag(self)
'''
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
uint16_codec=None,
simplify_cells=False):
'''Initializer for matlab 5 file format reader
%(matstream_arg)s
%(load_args)s
%(struct_arg)s
uint16_codec : {None, string}
Set codec to use for uint16 char arrays (e.g., 'utf-8').
Use system default codec if None
'''
super().__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record,
verify_compressed_data_integrity,
simplify_cells)
# Set uint16 codec
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
# placeholders for readers - see initialize_read method
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
''' Guess byte order.
Sets stream pointer to 0'''
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
''' Read in mat 5 file header '''
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
# reader for top level stream. We need this extra top-level
# reader because we use the matrix_reader object to contain
# compressed matrices (so they have their own stream)
self._file_reader = VarReader5(self)
# reader for matrix streams
self._matrix_reader = VarReader5(self)
def read_var_header(self):
''' Read header, return header, next position
Header has to define at least .name and .is_global
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes .name and .is_global
next_position : int
position in stream of next variable
'''
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
# Make new stream from compressed data
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
check_stream_limit = self.verify_compressed_data_integrity
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
check_stream_limit = False
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header(check_stream_limit)
return header, next_pos
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False} bool, optional
If True, apply recursive post-processing during loading of
array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
variable_names - optional list of variable names to get
If variable_names is None, then get all variables in file
'''
if isinstance(variable_names, str):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
# We want to keep this raw because mat_dtype processing
# will break the format (uint8 as mxDOUBLE_CLASS)
process = False
else:
process = True
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
'Unreadable variable "%s", because "%s"' %
(name, err),
Warning, stacklevel=2)
res = "Read error: %s" % err
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
if self.simplify_cells:
return _simplify_cells(mdict)
else:
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
""" Pull variables out of mat 5 file as a sequence of mat file objects
This can be useful with a difficult mat file, containing unreadable
variables. This routine pulls the variables out in raw form and puts them,
unread, back into a file stream for saving or reading. Another use is the
pathological case where there is more than one variable of the same name in
the file; this routine returns the duplicates, whereas the standard reader
will overwrite duplicates in the returned dictionary.
The file pointer in `file_obj` will be undefined. File pointers for the
returned file-like objects are set at 0.
Parameters
----------
file_obj : file-like
file object containing mat file
Returns
-------
named_mats : list
list contains tuples of (name, BytesIO) where BytesIO is a file-like
object containing mat file contents as for a single variable. The
BytesIO contains a string with the original header and a single var. If
``var_file_obj`` is an individual BytesIO instance, then save as a mat
file with something like ``open('test.mat',
'wb').write(var_file_obj.read())``
Examples
--------
>>> import scipy.io
BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for
Python < 3.
>>> mat_fileobj = BytesIO()
>>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
>>> varmats = varmats_from_mat(mat_fileobj)
>>> sorted([name for name, str_obj in varmats])
['a', 'b']
"""
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
# Raw read of top-level file header
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
# Initialize variable reading
file_obj.seek(0)
rdr.initialize_read()
rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
# Read raw variable string
file_obj.seek(start_position)
byte_count = next_position - start_position
var_str = file_obj.read(byte_count)
# write to stringio object
out_obj = BytesIO()
out_obj.write(raw_hdr)
out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
class EmptyStructMarker:
""" Class to indicate presence of empty matlab struct on output """
def to_writeable(source):
''' Convert input object ``source`` to something we can write
Parameters
----------
source : object
Returns
-------
arr : None or ndarray or EmptyStructMarker
If `source` cannot be converted to something we can write to a matfile,
return None. If `source` is equivalent to an empty dictionary, return
``EmptyStructMarker``. Otherwise return `source` converted to an
ndarray with contents for writing to matfile.
'''
if isinstance(source, np.ndarray):
return source
if source is None:
return None
if hasattr(source, "__array__"):
return np.asarray(source)
# Objects that implement mappings
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
# Objects that don't implement mappings, but do have dicts
if isinstance(source, np.generic):
# NumPy scalars are never mappings (PyPy issue workaround)
pass
elif not is_mapping and hasattr(source, '__dict__'):
source = {key: value for key, value in source.__dict__.items()
if not key.startswith('_')}
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, str) and
field[0] not in '_0123456789'):
dtype.append((str(field), object))
values.append(value)
if dtype:
return np.array([tuple(values)], dtype)
else:
return EmptyStructMarker
# Next try and convert to an array
try:
narr = np.asanyarray(source)
except ValueError:
narr = np.asanyarray(source, dtype=object)
if narr.dtype.type in (object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5:
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tobytes is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tobytes(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array_like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array_like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker: # empty struct array
self.write_empty_struct()
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distinguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.prod(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as 1-D byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
# max field name length set to 1 in an example matlab struct
self.write_element(np.array(1, dtype=np.int32))
# Field names element is empty
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError("Field names are restricted to %d characters" %
(max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer:
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a NumPy
array.
write_header : {None, True, False}, optional
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
| 33,584
| 36.483259
| 84
|
py
|
scipy
|
scipy-main/scipy/io/matlab/mio.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio
__all__ = [ # noqa: F822
'mat_reader_factory', 'loadmat', 'savemat', 'whosmat',
'contextmanager', 'docfiller',
'MatFile4Reader', 'MatFile4Writer', 'MatFile5Reader', 'MatFile5Writer'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio, name)
| 894
| 28.833333
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/mio4.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio4
__all__ = [ # noqa: F822
'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN',
'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info',
'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE',
'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS',
'np_to_mtypes', 'order_codes', 'MatFileReader', 'docfiller',
'matdims', 'read_dtype', 'convert_dtypes', 'arr_to_chars',
'arr_dtype_number', 'squeeze_element', 'chars_to_strings'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio4 is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio4` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio4, name)
| 1,201
| 34.352941
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/mio_utils.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _mio_utils
__all__ = ['squeeze_element', 'chars_to_strings'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.mio_utils is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.mio_utils` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_mio_utils, name)
| 786
| 28.148148
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/__init__.py
|
"""
MATLAB® file utilies (:mod:`scipy.io.matlab`)
=============================================
.. currentmodule:: scipy.io.matlab
This submodule is meant to provide lower-level file utilies related to reading
and writing MATLAB files.
.. autosummary::
:toctree: generated/
matfile_version - Get the MATLAB file version
MatReadError - Exception indicating a read issue
MatReadWarning - Warning class for read issues
MatWriteError - Exception indicating a write issue
mat_struct - Class used when ``struct_as_record=False``
.. autosummary::
:toctree: generated/
:template: autosummary/ndarray_subclass.rst
:nosignatures:
MatlabObject - Class for a MATLAB object
MatlabOpaque - Class for a MATLAB opaque matrix
MatlabFunction - Class for a MATLAB function object
The following utilities that live in the :mod:`scipy.io`
namespace also exist in this namespace:
.. autosummary::
:toctree: generated/
loadmat - Read a MATLAB style mat file (version 4 through 7.1)
savemat - Write a MATLAB style mat file (version 4 through 7.1)
whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
Notes
-----
MATLAB(R) is a registered trademark of The MathWorks, Inc., 3 Apple Hill
Drive, Natick, MA 01760-2098, USA.
"""
# Matlab file read and write utilities
from ._mio import loadmat, savemat, whosmat
from ._mio5 import MatlabFunction
from ._mio5_params import MatlabObject, MatlabOpaque, mat_struct
from ._miobase import (matfile_version, MatReadError, MatReadWarning,
MatWriteError)
# Deprecated namespaces, to be removed in v2.0.0
from .import (mio, mio5, mio5_params, mio4, byteordercodes,
miobase, mio_utils, streams, mio5_utils)
__all__ = [
'loadmat', 'savemat', 'whosmat', 'MatlabObject',
'matfile_version', 'MatReadError', 'MatReadWarning',
'MatWriteError', 'mat_struct', 'MatlabOpaque', 'MatlabFunction'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 2,021
| 30.59375
| 78
|
py
|
scipy
|
scipy-main/scipy/io/matlab/miobase.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.matlab` namespace for importing the functions
# included below.
import warnings
from . import _miobase
__all__ = [ # noqa: F822
'MatFileReader', 'MatReadError', 'MatReadWarning',
'MatVarReader', 'MatWriteError', 'arr_dtype_number',
'arr_to_chars', 'convert_dtypes', 'doc_dict',
'docfiller', 'get_matfile_version',
'matdims', 'read_dtype', 'doccer', 'boc'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.matlab.miobase is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.matlab instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
"the `scipy.io.matlab.miobase` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_miobase, name)
| 988
| 29.90625
| 79
|
py
|
scipy
|
scipy-main/scipy/io/matlab/_mio5_params.py
|
''' Constants and classes for matlab 5 read and write
See also mio5_utils.pyx where these same constants arise as c enums.
If you make changes in this file, don't forget to change mio5_utils.pyx
'''
import numpy as np
from ._miobase import convert_dtypes
__all__ = [
'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque',
'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template',
'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template',
'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8',
'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8',
'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS',
'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS',
'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS',
'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS',
'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS',
'mxUINT64_CLASS', 'mxUINT8_CLASS'
]
miINT8 = 1
miUINT8 = 2
miINT16 = 3
miUINT16 = 4
miINT32 = 5
miUINT32 = 6
miSINGLE = 7
miDOUBLE = 9
miINT64 = 12
miUINT64 = 13
miMATRIX = 14
miCOMPRESSED = 15
miUTF8 = 16
miUTF16 = 17
miUTF32 = 18
mxCELL_CLASS = 1
mxSTRUCT_CLASS = 2
# The March 2008 edition of "Matlab 7 MAT-File Format" says that
# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3.
# Matlab 2008a appears to save logicals as type 9, so we assume that
# the document is correct. See type 18, below.
mxOBJECT_CLASS = 3
mxCHAR_CLASS = 4
mxSPARSE_CLASS = 5
mxDOUBLE_CLASS = 6
mxSINGLE_CLASS = 7
mxINT8_CLASS = 8
mxUINT8_CLASS = 9
mxINT16_CLASS = 10
mxUINT16_CLASS = 11
mxINT32_CLASS = 12
mxUINT32_CLASS = 13
# The following are not in the March 2008 edition of "Matlab 7
# MAT-File Format," but were guessed from matrix.h.
mxINT64_CLASS = 14
mxUINT64_CLASS = 15
mxFUNCTION_CLASS = 16
# Not doing anything with these at the moment.
mxOPAQUE_CLASS = 17 # This appears to be a function workspace
# Thread 'saving/loading symbol table of annymous functions', octave-maintainers, April-May 2007
# https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html
# https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html
# (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html)
mxOBJECT_CLASS_FROM_MATRIX_H = 18
mdtypes_template = {
miINT8: 'i1',
miUINT8: 'u1',
miINT16: 'i2',
miUINT16: 'u2',
miINT32: 'i4',
miUINT32: 'u4',
miSINGLE: 'f4',
miDOUBLE: 'f8',
miINT64: 'i8',
miUINT64: 'u8',
miUTF8: 'u1',
miUTF16: 'u2',
miUTF32: 'u4',
'file_header': [('description', 'S116'),
('subsystem_offset', 'i8'),
('version', 'u2'),
('endian_test', 'S2')],
'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')],
'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')],
'array_flags': [('data_type', 'u4'),
('byte_count', 'u4'),
('flags_class','u4'),
('nzmax', 'u4')],
'U1': 'U1',
}
mclass_dtypes_template = {
mxINT8_CLASS: 'i1',
mxUINT8_CLASS: 'u1',
mxINT16_CLASS: 'i2',
mxUINT16_CLASS: 'u2',
mxINT32_CLASS: 'i4',
mxUINT32_CLASS: 'u4',
mxINT64_CLASS: 'i8',
mxUINT64_CLASS: 'u8',
mxSINGLE_CLASS: 'f4',
mxDOUBLE_CLASS: 'f8',
}
mclass_info = {
mxINT8_CLASS: 'int8',
mxUINT8_CLASS: 'uint8',
mxINT16_CLASS: 'int16',
mxUINT16_CLASS: 'uint16',
mxINT32_CLASS: 'int32',
mxUINT32_CLASS: 'uint32',
mxINT64_CLASS: 'int64',
mxUINT64_CLASS: 'uint64',
mxSINGLE_CLASS: 'single',
mxDOUBLE_CLASS: 'double',
mxCELL_CLASS: 'cell',
mxSTRUCT_CLASS: 'struct',
mxOBJECT_CLASS: 'object',
mxCHAR_CLASS: 'char',
mxSPARSE_CLASS: 'sparse',
mxFUNCTION_CLASS: 'function',
mxOPAQUE_CLASS: 'opaque',
}
NP_TO_MTYPES = {
'f8': miDOUBLE,
'c32': miDOUBLE,
'c24': miDOUBLE,
'c16': miDOUBLE,
'f4': miSINGLE,
'c8': miSINGLE,
'i8': miINT64,
'i4': miINT32,
'i2': miINT16,
'i1': miINT8,
'u8': miUINT64,
'u4': miUINT32,
'u2': miUINT16,
'u1': miUINT8,
'S1': miUINT8,
'U1': miUTF16,
'b1': miUINT8, # not standard but seems MATLAB uses this (gh-4022)
}
NP_TO_MXTYPES = {
'f8': mxDOUBLE_CLASS,
'c32': mxDOUBLE_CLASS,
'c24': mxDOUBLE_CLASS,
'c16': mxDOUBLE_CLASS,
'f4': mxSINGLE_CLASS,
'c8': mxSINGLE_CLASS,
'i8': mxINT64_CLASS,
'i4': mxINT32_CLASS,
'i2': mxINT16_CLASS,
'i1': mxINT8_CLASS,
'u8': mxUINT64_CLASS,
'u4': mxUINT32_CLASS,
'u2': mxUINT16_CLASS,
'u1': mxUINT8_CLASS,
'S1': mxUINT8_CLASS,
'b1': mxUINT8_CLASS, # not standard but seems MATLAB uses this
}
''' Before release v7.1 (release 14) matlab (TM) used the system
default character encoding scheme padded out to 16-bits. Release 14
and later use Unicode. When saving character data, R14 checks if it
can be encoded in 7-bit ascii, and saves in that format if so.'''
codecs_template = {
miUTF8: {'codec': 'utf_8', 'width': 1},
miUTF16: {'codec': 'utf_16', 'width': 2},
miUTF32: {'codec': 'utf_32','width': 4},
}
def _convert_codecs(template, byte_order):
''' Convert codec template mapping to byte order
Set codecs not on this system to None
Parameters
----------
template : mapping
key, value are respectively codec name, and root name for codec
(without byte order suffix)
byte_order : {'<', '>'}
code for little or big endian
Returns
-------
codecs : dict
key, value are name, codec (as in .encode(codec))
'''
codecs = {}
postfix = byte_order == '<' and '_le' or '_be'
for k, v in template.items():
codec = v['codec']
try:
" ".encode(codec)
except LookupError:
codecs[k] = None
continue
if v['width'] > 1:
codec += postfix
codecs[k] = codec
return codecs.copy()
MDTYPES = {}
for _bytecode in '<>':
_def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode),
'classes': convert_dtypes(mclass_dtypes_template, _bytecode),
'codecs': _convert_codecs(codecs_template, _bytecode)}
MDTYPES[_bytecode] = _def
class mat_struct:
"""Placeholder for holding read data from structs.
We use instances of this class when the user passes False as a value to the
``struct_as_record`` parameter of the :func:`scipy.io.loadmat` function.
"""
pass
class MatlabObject(np.ndarray):
"""Subclass of ndarray to signal this is a matlab object.
This is a simple subclass of :class:`numpy.ndarray` meant to be used
by :func:`scipy.io.loadmat` and should not be instantiated directly.
"""
def __new__(cls, input_array, classname=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.classname = classname
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self,obj):
# reset the attribute from passed original object
self.classname = getattr(obj, 'classname', None)
# We do not need to return anything
class MatlabFunction(np.ndarray):
"""Subclass for a MATLAB function.
This is a simple subclass of :class:`numpy.ndarray` meant to be used
by :func:`scipy.io.loadmat` and should not be directly instantiated.
"""
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj
class MatlabOpaque(np.ndarray):
"""Subclass for a MATLAB opaque matrix.
This is a simple subclass of :class:`numpy.ndarray` meant to be used
by :func:`scipy.io.loadmat` and should not be directly instantiated.
"""
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj
OPAQUE_DTYPE = np.dtype(
[('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')])
| 8,199
| 28.181495
| 98
|
py
|
scipy
|
scipy-main/scipy/io/matlab/_byteordercodes.py
|
''' Byteorder utilities for system - numpy byteorder encoding
Converts a variety of string codes for little endian, big endian,
native byte order and swapped byte order to explicit NumPy endian
codes - one of '<' (little endian) or '>' (big endian)
'''
import sys
__all__ = [
'aliases', 'native_code', 'swapped_code',
'sys_is_le', 'to_numpy_code'
]
sys_is_le = sys.byteorder == 'little'
native_code = sys_is_le and '<' or '>'
swapped_code = sys_is_le and '>' or '<'
aliases = {'little': ('little', '<', 'l', 'le'),
'big': ('big', '>', 'b', 'be'),
'native': ('native', '='),
'swapped': ('swapped', 'S')}
def to_numpy_code(code):
"""
Convert various order codings to NumPy format.
Parameters
----------
code : str
The code to convert. It is converted to lower case before parsing.
Legal values are:
'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=',
'swapped', 's'.
Returns
-------
out_code : {'<', '>'}
Here '<' is the numpy dtype code for little endian,
and '>' is the code for big endian.
Examples
--------
>>> import sys
>>> sys_is_le == (sys.byteorder == 'little')
True
>>> to_numpy_code('big')
'>'
>>> to_numpy_code('little')
'<'
>>> nc = to_numpy_code('native')
>>> nc == '<' if sys_is_le else nc == '>'
True
>>> sc = to_numpy_code('swapped')
>>> sc == '>' if sys_is_le else sc == '<'
True
"""
code = code.lower()
if code is None:
return native_code
if code in aliases['little']:
return '<'
elif code in aliases['big']:
return '>'
elif code in aliases['native']:
return native_code
elif code in aliases['swapped']:
return swapped_code
else:
raise ValueError(
'We cannot handle byte order %s' % code)
| 1,902
| 24.716216
| 74
|
py
|
scipy
|
scipy-main/scipy/io/matlab/_mio.py
|
"""
Module for reading and writing matlab (TM) .mat files
"""
# Authors: Travis Oliphant, Matthew Brett
from contextlib import contextmanager
from ._miobase import _get_matfile_version, docfiller
from ._mio4 import MatFile4Reader, MatFile4Writer
from ._mio5 import MatFile5Reader, MatFile5Writer
__all__ = ['mat_reader_factory', 'loadmat', 'savemat', 'whosmat']
@contextmanager
def _open_file_context(file_like, appendmat, mode='rb'):
f, opened = _open_file(file_like, appendmat, mode)
try:
yield f
finally:
if opened:
f.close()
def _open_file(file_like, appendmat, mode='rb'):
"""
Open `file_like` and return as file-like object. First, check if object is
already file-like; if so, return it as-is. Otherwise, try to pass it
to open(). If that fails, and `file_like` is a string, and `appendmat` is true,
append '.mat' and try again.
"""
reqs = {'read'} if set(mode) & set('r+') else set()
if set(mode) & set('wax+'):
reqs.add('write')
if reqs.issubset(dir(file_like)):
return file_like, False
try:
return open(file_like, mode), True
except OSError as e:
# Probably "not found"
if isinstance(file_like, str):
if appendmat and not file_like.endswith('.mat'):
file_like += '.mat'
return open(file_like, mode), True
else:
raise OSError(
'Reader needs file name or open file-like object'
) from e
@docfiller
def mat_reader_factory(file_name, appendmat=True, **kwargs):
"""
Create reader for matlab .mat format files.
Parameters
----------
%(file_arg)s
%(append_arg)s
%(load_args)s
%(struct_arg)s
Returns
-------
matreader : MatFileReader object
Initialized instance of MatFileReader class matching the mat file
type detected in `filename`.
file_opened : bool
Whether the file was opened by this routine.
"""
byte_stream, file_opened = _open_file(file_name, appendmat)
mjv, mnv = _get_matfile_version(byte_stream)
if mjv == 0:
return MatFile4Reader(byte_stream, **kwargs), file_opened
elif mjv == 1:
return MatFile5Reader(byte_stream, **kwargs), file_opened
elif mjv == 2:
raise NotImplementedError('Please use HDF reader for matlab v7.3 '
'files, e.g. h5py')
else:
raise TypeError('Did not recognize version %s' % mjv)
@docfiller
def loadmat(file_name, mdict=None, appendmat=True, **kwargs):
"""
Load MATLAB file.
Parameters
----------
file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True). Can also pass open file-like object.
mdict : dict, optional
Dictionary in which to insert matfile variables.
appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present. Default is True.
byte_order : str or None, optional
None by default, implying byte order guessed from mat
file. Otherwise can be one of ('native', '=', 'little', '<',
'BIG', '>').
mat_dtype : bool, optional
If True, return arrays in same dtype as would be loaded into
MATLAB (instead of the dtype with which they are saved).
squeeze_me : bool, optional
Whether to squeeze unit matrix dimensions or not.
chars_as_strings : bool, optional
Whether to convert char arrays to string arrays.
matlab_compatible : bool, optional
Returns matrices as would be loaded by MATLAB (implies
squeeze_me=False, chars_as_strings=False, mat_dtype=True,
struct_as_record=True).
struct_as_record : bool, optional
Whether to load MATLAB structs as NumPy record arrays, or as
old-style NumPy arrays with dtype=object. Setting this flag to
False replicates the behavior of scipy version 0.7.x (returning
NumPy object arrays). The default setting is True, because it
allows easier round-trip load and save of MATLAB files.
verify_compressed_data_integrity : bool, optional
Whether the length of compressed sequences in the MATLAB file
should be checked, to ensure that they are not longer than we expect.
It is advisable to enable this (the default) because overlong
compressed sequences in MATLAB files generally indicate that the
files have experienced some sort of corruption.
variable_names : None or sequence
If None (the default) - read all variables in file. Otherwise,
`variable_names` should be a sequence of strings, giving names of the
MATLAB variables to read from the file. The reader will skip any
variable with a name not in this sequence, possibly saving some read
processing.
simplify_cells : False, optional
If True, return a simplified dict structure (which is useful if the mat
file contains cell arrays). Note that this only affects the structure
of the result and not its contents (which is identical for both output
structures). If True, this automatically sets `struct_as_record` to
False and `squeeze_me` to True, which is required to simplify cells.
Returns
-------
mat_dict : dict
dictionary with variable names as keys, and loaded matrices as
values.
Notes
-----
v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
You will need an HDF5 Python library to read MATLAB 7.3 format mat
files. Because SciPy does not supply one, we do not implement the
HDF5 / 7.3 interface here.
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
Get the filename for an example .mat file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data')
>>> mat_fname = pjoin(data_dir, 'testdouble_7.4_GLNX86.mat')
Load the .mat file contents.
>>> mat_contents = sio.loadmat(mat_fname)
The result is a dictionary, one key/value pair for each variable:
>>> sorted(mat_contents.keys())
['__globals__', '__header__', '__version__', 'testdouble']
>>> mat_contents['testdouble']
array([[0. , 0.78539816, 1.57079633, 2.35619449, 3.14159265,
3.92699082, 4.71238898, 5.49778714, 6.28318531]])
By default SciPy reads MATLAB structs as structured NumPy arrays where the
dtype fields are of type `object` and the names correspond to the MATLAB
struct field names. This can be disabled by setting the optional argument
`struct_as_record=False`.
Get the filename for an example .mat file that contains a MATLAB struct
called `teststruct` and load the contents.
>>> matstruct_fname = pjoin(data_dir, 'teststruct_7.4_GLNX86.mat')
>>> matstruct_contents = sio.loadmat(matstruct_fname)
>>> teststruct = matstruct_contents['teststruct']
>>> teststruct.dtype
dtype([('stringfield', 'O'), ('doublefield', 'O'), ('complexfield', 'O')])
The size of the structured array is the size of the MATLAB struct, not the
number of elements in any particular field. The shape defaults to 2-D
unless the optional argument `squeeze_me=True`, in which case all length 1
dimensions are removed.
>>> teststruct.size
1
>>> teststruct.shape
(1, 1)
Get the 'stringfield' of the first element in the MATLAB struct.
>>> teststruct[0, 0]['stringfield']
array(['Rats live on no evil star.'],
dtype='<U26')
Get the first element of the 'doublefield'.
>>> teststruct['doublefield'][0, 0]
array([[ 1.41421356, 2.71828183, 3.14159265]])
Load the MATLAB struct, squeezing out length 1 dimensions, and get the item
from the 'complexfield'.
>>> matstruct_squeezed = sio.loadmat(matstruct_fname, squeeze_me=True)
>>> matstruct_squeezed['teststruct'].shape
()
>>> matstruct_squeezed['teststruct']['complexfield'].shape
()
>>> matstruct_squeezed['teststruct']['complexfield'].item()
array([ 1.41421356+1.41421356j, 2.71828183+2.71828183j,
3.14159265+3.14159265j])
"""
variable_names = kwargs.pop('variable_names', None)
with _open_file_context(file_name, appendmat) as f:
MR, _ = mat_reader_factory(f, **kwargs)
matfile_dict = MR.get_variables(variable_names)
if mdict is not None:
mdict.update(matfile_dict)
else:
mdict = matfile_dict
return mdict
@docfiller
def savemat(file_name, mdict,
appendmat=True,
format='5',
long_field_names=False,
do_compression=False,
oned_as='row'):
"""
Save a dictionary of names and arrays into a MATLAB-style .mat file.
This saves the array objects in the given dictionary to a MATLAB-
style .mat file.
Parameters
----------
file_name : str or file-like object
Name of the .mat file (.mat extension not needed if ``appendmat ==
True``).
Can also pass open file_like object.
mdict : dict
Dictionary from which to save matfile variables.
appendmat : bool, optional
True (the default) to append the .mat extension to the end of the
given filename, if not already present.
format : {'5', '4'}, string, optional
'5' (the default) for MATLAB 5 and up (to 7.2),
'4' for MATLAB 4 .mat files.
long_field_names : bool, optional
False (the default) - maximum field name length in a structure is
31 characters which is the documented maximum length.
True - maximum field name length in a structure is 63 characters
which works for MATLAB 7.6+.
do_compression : bool, optional
Whether or not to compress matrices on write. Default is False.
oned_as : {'row', 'column'}, optional
If 'column', write 1-D NumPy arrays as column vectors.
If 'row', write 1-D NumPy arrays as row vectors.
Examples
--------
>>> from scipy.io import savemat
>>> import numpy as np
>>> a = np.arange(20)
>>> mdic = {"a": a, "label": "experiment"}
>>> mdic
{'a': array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19]),
'label': 'experiment'}
>>> savemat("matlab_matrix.mat", mdic)
"""
with _open_file_context(file_name, appendmat, 'wb') as file_stream:
if format == '4':
if long_field_names:
raise ValueError("Long field names are not available for version 4 files")
MW = MatFile4Writer(file_stream, oned_as)
elif format == '5':
MW = MatFile5Writer(file_stream,
do_compression=do_compression,
unicode_strings=True,
long_field_names=long_field_names,
oned_as=oned_as)
else:
raise ValueError("Format should be '4' or '5'")
MW.put_variables(mdict)
@docfiller
def whosmat(file_name, appendmat=True, **kwargs):
"""
List variables inside a MATLAB file.
Parameters
----------
%(file_arg)s
%(append_arg)s
%(load_args)s
%(struct_arg)s
Returns
-------
variables : list of tuples
A list of tuples, where each tuple holds the matrix name (a string),
its shape (tuple of ints), and its data class (a string).
Possible data classes are: int8, uint8, int16, uint16, int32, uint32,
int64, uint64, single, double, cell, struct, object, char, sparse,
function, opaque, logical, unknown.
Notes
-----
v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
You will need an HDF5 python library to read matlab 7.3 format mat
files (e.g. h5py). Because SciPy does not supply one, we do not implement the
HDF5 / 7.3 interface here.
.. versionadded:: 0.12.0
Examples
--------
>>> from io import BytesIO
>>> import numpy as np
>>> from scipy.io import savemat, whosmat
Create some arrays, and use `savemat` to write them to a ``BytesIO``
instance.
>>> a = np.array([[10, 20, 30], [11, 21, 31]], dtype=np.int32)
>>> b = np.geomspace(1, 10, 5)
>>> f = BytesIO()
>>> savemat(f, {'a': a, 'b': b})
Use `whosmat` to inspect ``f``. Each tuple in the output list gives
the name, shape and data type of the array in ``f``.
>>> whosmat(f)
[('a', (2, 3), 'int32'), ('b', (1, 5), 'double')]
"""
with _open_file_context(file_name, appendmat) as f:
ML, file_opened = mat_reader_factory(f, **kwargs)
variables = ML.list_variables()
return variables
| 12,799
| 34.654596
| 90
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_byteordercodes.py
|
''' Tests for byteorder module '''
import sys
from numpy.testing import assert_
from pytest import raises as assert_raises
import scipy.io.matlab._byteordercodes as sibc
def test_native():
native_is_le = sys.byteorder == 'little'
assert_(sibc.sys_is_le == native_is_le)
def test_to_numpy():
if sys.byteorder == 'little':
assert_(sibc.to_numpy_code('native') == '<')
assert_(sibc.to_numpy_code('swapped') == '>')
else:
assert_(sibc.to_numpy_code('native') == '>')
assert_(sibc.to_numpy_code('swapped') == '<')
assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('='))
assert_(sibc.to_numpy_code('big') == '>')
for code in ('little', '<', 'l', 'L', 'le'):
assert_(sibc.to_numpy_code(code) == '<')
for code in ('big', '>', 'b', 'B', 'be'):
assert_(sibc.to_numpy_code(code) == '>')
assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
| 938
| 30.3
| 68
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_streams.py
|
""" Testing
"""
import os
import zlib
from io import BytesIO
from tempfile import mkstemp
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_, assert_equal
from pytest import raises as assert_raises
from scipy.io.matlab._streams import (make_stream,
GenericStream, ZlibInputStream,
_read_into, _read_string, BLOCK_SIZE)
@contextmanager
def setup_test_file():
val = b'a\x00string'
fd, fname = mkstemp()
with os.fdopen(fd, 'wb') as fs:
fs.write(val)
with open(fname, 'rb') as fs:
gs = BytesIO(val)
cs = BytesIO(val)
yield fs, gs, cs
os.unlink(fname)
def test_make_stream():
with setup_test_file() as (fs, gs, cs):
# test stream initialization
assert_(isinstance(make_stream(gs), GenericStream))
def test_tell_seek():
with setup_test_file() as (fs, gs, cs):
for s in (fs, gs, cs):
st = make_stream(s)
res = st.seek(0)
assert_equal(res, 0)
assert_equal(st.tell(), 0)
res = st.seek(5)
assert_equal(res, 0)
assert_equal(st.tell(), 5)
res = st.seek(2, 1)
assert_equal(res, 0)
assert_equal(st.tell(), 7)
res = st.seek(-2, 2)
assert_equal(res, 0)
assert_equal(st.tell(), 6)
def test_read():
with setup_test_file() as (fs, gs, cs):
for s in (fs, gs, cs):
st = make_stream(s)
st.seek(0)
res = st.read(-1)
assert_equal(res, b'a\x00string')
st.seek(0)
res = st.read(4)
assert_equal(res, b'a\x00st')
# read into
st.seek(0)
res = _read_into(st, 4)
assert_equal(res, b'a\x00st')
res = _read_into(st, 4)
assert_equal(res, b'ring')
assert_raises(OSError, _read_into, st, 2)
# read alloc
st.seek(0)
res = _read_string(st, 4)
assert_equal(res, b'a\x00st')
res = _read_string(st, 4)
assert_equal(res, b'ring')
assert_raises(OSError, _read_string, st, 2)
class TestZlibInputStream:
def _get_data(self, size):
data = np.random.randint(0, 256, size).astype(np.uint8).tobytes()
compressed_data = zlib.compress(data)
stream = BytesIO(compressed_data)
return stream, len(compressed_data), data
def test_read(self):
SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1,
BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1]
READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1,
BLOCK_SIZE, BLOCK_SIZE+1]
def check(size, read_size):
compressed_stream, compressed_data_len, data = self._get_data(size)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
data2 = b''
so_far = 0
while True:
block = stream.read(min(read_size,
size - so_far))
if not block:
break
so_far += len(block)
data2 += block
assert_equal(data, data2)
for size in SIZES:
for read_size in READ_SIZES:
check(size, read_size)
def test_read_max_length(self):
size = 1234
data = np.random.randint(0, 256, size).astype(np.uint8).tobytes()
compressed_data = zlib.compress(data)
compressed_stream = BytesIO(compressed_data + b"abbacaca")
stream = ZlibInputStream(compressed_stream, len(compressed_data))
stream.read(len(data))
assert_equal(compressed_stream.tell(), len(compressed_data))
assert_raises(OSError, stream.read, 1)
def test_read_bad_checksum(self):
data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes()
compressed_data = zlib.compress(data)
# break checksum
compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, len(compressed_data))
assert_raises(zlib.error, stream.read, len(data))
def test_seek(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
stream.seek(123)
p = 123
assert_equal(stream.tell(), p)
d1 = stream.read(11)
assert_equal(d1, data[p:p+11])
stream.seek(321, 1)
p = 123+11+321
assert_equal(stream.tell(), p)
d2 = stream.read(21)
assert_equal(d2, data[p:p+21])
stream.seek(641, 0)
p = 641
assert_equal(stream.tell(), p)
d3 = stream.read(11)
assert_equal(d3, data[p:p+11])
assert_raises(OSError, stream.seek, 10, 2)
assert_raises(OSError, stream.seek, -1, 1)
assert_raises(ValueError, stream.seek, 1, 123)
stream.seek(10000, 1)
assert_raises(OSError, stream.read, 12)
def test_seek_bad_checksum(self):
data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes()
compressed_data = zlib.compress(data)
# break checksum
compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, len(compressed_data))
assert_raises(zlib.error, stream.seek, len(data))
def test_all_data_read(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(512)
assert_(not stream.all_data_read())
stream.seek(1024)
assert_(stream.all_data_read())
def test_all_data_read_overlap(self):
COMPRESSION_LEVEL = 6
data = np.arange(33707000).astype(np.uint8).tobytes()
compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
compressed_data_len = len(compressed_data)
# check that part of the checksum overlaps
assert_(compressed_data_len == BLOCK_SIZE + 2)
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(len(data))
assert_(stream.all_data_read())
def test_all_data_read_bad_checksum(self):
COMPRESSION_LEVEL = 6
data = np.arange(33707000).astype(np.uint8).tobytes()
compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
compressed_data_len = len(compressed_data)
# check that part of the checksum overlaps
assert_(compressed_data_len == BLOCK_SIZE + 2)
# break checksum
compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
compressed_stream = BytesIO(compressed_data)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(len(data))
assert_raises(zlib.error, stream.all_data_read)
| 7,319
| 30.826087
| 89
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_miobase.py
|
""" Testing miobase module
"""
import numpy as np
from numpy.testing import assert_equal
from pytest import raises as assert_raises
from scipy.io.matlab._miobase import matdims
def test_matdims():
# Test matdims dimension finder
assert_equal(matdims(np.array(1)), (1, 1)) # NumPy scalar
assert_equal(matdims(np.array([1])), (1, 1)) # 1-D array, 1 element
assert_equal(matdims(np.array([1,2])), (2, 1)) # 1-D array, 2 elements
assert_equal(matdims(np.array([[2],[3]])), (2, 1)) # 2-D array, column vector
assert_equal(matdims(np.array([[2,3]])), (1, 2)) # 2-D array, row vector
# 3d array, rowish vector
assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2))
assert_equal(matdims(np.array([])), (0, 0)) # empty 1-D array
assert_equal(matdims(np.array([[]])), (1, 0)) # empty 2-D array
assert_equal(matdims(np.array([[[]]])), (1, 1, 0)) # empty 3-D array
assert_equal(matdims(np.empty((1, 0, 1))), (1, 0, 1)) # empty 3-D array
# Optional argument flips 1-D shape behavior.
assert_equal(matdims(np.array([1,2]), 'row'), (1, 2)) # 1-D array, 2 elements
# The argument has to make sense though
assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre')
# Check empty sparse matrices get their own shape
from scipy.sparse import csr_matrix, csc_matrix
assert_equal(matdims(csr_matrix(np.zeros((3, 3)))), (3, 3))
assert_equal(matdims(csc_matrix(np.zeros((2, 2)))), (2, 2))
| 1,464
| 43.393939
| 82
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_mio_funcs.py
|
''' Jottings to work out format for __function_workspace__ matrix at end
of mat file.
'''
import os.path
import io
from scipy.io.matlab._mio5 import MatFile5Reader
test_data_path = os.path.join(os.path.dirname(__file__), 'data')
def read_minimat_vars(rdr):
rdr.initialize_read()
mdict = {'__globals__': []}
i = 0
while not rdr.end_of_stream():
hdr, next_position = rdr.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
if name == '':
name = 'var_%d' % i
i += 1
res = rdr.read_var_array(hdr, process=False)
rdr.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
return mdict
def read_workspace_vars(fname):
fp = open(fname, 'rb')
rdr = MatFile5Reader(fp, struct_as_record=True)
vars = rdr.get_variables()
fws = vars['__function_workspace__']
ws_bs = io.BytesIO(fws.tobytes())
ws_bs.seek(2)
rdr.mat_stream = ws_bs
# Guess byte order.
mi = rdr.mat_stream.read(2)
rdr.byte_order = mi == b'IM' and '<' or '>'
rdr.mat_stream.read(4) # presumably byte padding
mdict = read_minimat_vars(rdr)
fp.close()
return mdict
def test_jottings():
# example
fname = os.path.join(test_data_path, 'parabola.mat')
read_workspace_vars(fname)
| 1,392
| 25.788462
| 72
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_mio_utils.py
|
""" Testing
"""
import numpy as np
from numpy.testing import assert_array_equal, assert_
from scipy.io.matlab._mio_utils import squeeze_element, chars_to_strings
def test_squeeze_element():
a = np.zeros((1,3))
assert_array_equal(np.squeeze(a), squeeze_element(a))
# 0-D output from squeeze gives scalar
sq_int = squeeze_element(np.zeros((1,1), dtype=float))
assert_(isinstance(sq_int, float))
# Unless it's a structured array
sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')]))
assert_(isinstance(sq_sa, np.ndarray))
# Squeezing empty arrays maintain their dtypes.
sq_empty = squeeze_element(np.empty(0, np.uint8))
assert sq_empty.dtype == np.uint8
def test_chars_strings():
# chars as strings
strings = ['learn ', 'python', 'fast ', 'here ']
str_arr = np.array(strings, dtype='U6') # shape (4,)
chars = [list(s) for s in strings]
char_arr = np.array(chars, dtype='U1') # shape (4,6)
assert_array_equal(chars_to_strings(char_arr), str_arr)
ca2d = char_arr.reshape((2,2,6))
sa2d = str_arr.reshape((2,2))
assert_array_equal(chars_to_strings(ca2d), sa2d)
ca3d = char_arr.reshape((1,2,2,6))
sa3d = str_arr.reshape((1,2,2))
assert_array_equal(chars_to_strings(ca3d), sa3d)
# Fortran ordered arrays
char_arrf = np.array(chars, dtype='U1', order='F') # shape (4,6)
assert_array_equal(chars_to_strings(char_arrf), str_arr)
# empty array
arr = np.array([['']], dtype='U1')
out_arr = np.array([''], dtype='U1')
assert_array_equal(chars_to_strings(arr), out_arr)
| 1,594
| 33.673913
| 72
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_mio5_utils.py
|
""" Testing mio5_utils Cython module
"""
import sys
from io import BytesIO
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_
from pytest import raises as assert_raises
import scipy.io.matlab._byteordercodes as boc
import scipy.io.matlab._streams as streams
import scipy.io.matlab._mio5_params as mio5p
import scipy.io.matlab._mio5_utils as m5u
def test_byteswap():
for val in (
1,
0x100,
0x10000):
a = np.array(val, dtype=np.uint32)
b = a.byteswap()
c = m5u.byteswap_u4(a)
assert_equal(b.item(), c)
d = m5u.byteswap_u4(c)
assert_equal(a.item(), d)
def _make_tag(base_dt, val, mdtype, sde=False):
''' Makes a simple matlab tag, full or sde '''
base_dt = np.dtype(base_dt)
bo = boc.to_numpy_code(base_dt.byteorder)
byte_count = base_dt.itemsize
if not sde:
udt = bo + 'u4'
padding = 8 - (byte_count % 8)
all_dt = [('mdtype', udt),
('byte_count', udt),
('val', base_dt)]
if padding:
all_dt.append(('padding', 'u1', padding))
else: # is sde
udt = bo + 'u2'
padding = 4-byte_count
if bo == '<': # little endian
all_dt = [('mdtype', udt),
('byte_count', udt),
('val', base_dt)]
else: # big endian
all_dt = [('byte_count', udt),
('mdtype', udt),
('val', base_dt)]
if padding:
all_dt.append(('padding', 'u1', padding))
tag = np.zeros((1,), dtype=all_dt)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
tag['val'] = val
return tag
def _write_stream(stream, *strings):
stream.truncate(0)
stream.seek(0)
for s in strings:
stream.write(s)
stream.seek(0)
def _make_readerlike(stream, byte_order=boc.native_code):
class R:
pass
r = R()
r.mat_stream = stream
r.byte_order = byte_order
r.struct_as_record = True
r.uint16_codec = sys.getdefaultencoding()
r.chars_as_strings = False
r.mat_dtype = False
r.squeeze_me = False
return r
def test_read_tag():
# mainly to test errors
# make reader-like thing
str_io = BytesIO()
r = _make_readerlike(str_io)
c_reader = m5u.VarReader5(r)
# This works for StringIO but _not_ BytesIO
assert_raises(OSError, c_reader.read_tag)
# bad SDE
tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
tag['byte_count'] = 5
_write_stream(str_io, tag.tobytes())
assert_raises(ValueError, c_reader.read_tag)
def test_read_stream():
tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
tag_str = tag.tobytes()
str_io = BytesIO(tag_str)
st = streams.make_stream(str_io)
s = streams._read_into(st, tag.itemsize)
assert_equal(s, tag.tobytes())
def test_read_numeric():
# make reader-like thing
str_io = BytesIO()
r = _make_readerlike(str_io)
# check simplest of tags
for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16),
('i4', 1, mio5p.miINT32),
('i2', -1, mio5p.miINT16)):
for byte_code in ('<', '>'):
r.byte_order = byte_code
c_reader = m5u.VarReader5(r)
assert_equal(c_reader.little_endian, byte_code == '<')
assert_equal(c_reader.is_swapped, byte_code != boc.native_code)
for sde_f in (False, True):
dt = np.dtype(base_dt).newbyteorder(byte_code)
a = _make_tag(dt, val, mdtype, sde_f)
a_str = a.tobytes()
_write_stream(str_io, a_str)
el = c_reader.read_numeric()
assert_equal(el, val)
# two sequential reads
_write_stream(str_io, a_str, a_str)
el = c_reader.read_numeric()
assert_equal(el, val)
el = c_reader.read_numeric()
assert_equal(el, val)
def test_read_numeric_writeable():
# make reader-like thing
str_io = BytesIO()
r = _make_readerlike(str_io, '<')
c_reader = m5u.VarReader5(r)
dt = np.dtype('<u2')
a = _make_tag(dt, 30, mio5p.miUINT16, 0)
a_str = a.tobytes()
_write_stream(str_io, a_str)
el = c_reader.read_numeric()
assert_(el.flags.writeable is True)
def test_zero_byte_string():
# Tests hack to allow chars of non-zero length, but 0 bytes
# make reader-like thing
str_io = BytesIO()
r = _make_readerlike(str_io, boc.native_code)
c_reader = m5u.VarReader5(r)
tag_dt = np.dtype([('mdtype', 'u4'), ('byte_count', 'u4')])
tag = np.zeros((1,), dtype=tag_dt)
tag['mdtype'] = mio5p.miINT8
tag['byte_count'] = 1
hdr = m5u.VarHeader5()
# Try when string is 1 length
hdr.set_dims([1,])
_write_stream(str_io, tag.tobytes() + b' ')
str_io.seek(0)
val = c_reader.read_char(hdr)
assert_equal(val, ' ')
# Now when string has 0 bytes 1 length
tag['byte_count'] = 0
_write_stream(str_io, tag.tobytes())
str_io.seek(0)
val = c_reader.read_char(hdr)
assert_equal(val, ' ')
# Now when string has 0 bytes 4 length
str_io.seek(0)
hdr.set_dims([4,])
val = c_reader.read_char(hdr)
assert_array_equal(val, [' '] * 4)
| 5,389
| 28.944444
| 75
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_mio.py
|
''' Nose test generators
Need function load / save / roundtrip tests
'''
import os
from collections import OrderedDict
from os.path import join as pjoin, dirname
from glob import glob
from io import BytesIO
import re
from tempfile import mkdtemp
import warnings
import shutil
import gzip
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_, assert_warns, assert_allclose)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import array
import scipy.sparse as SP
import scipy.io
from scipy.io.matlab import MatlabOpaque, MatlabFunction, MatlabObject
import scipy.io.matlab._byteordercodes as boc
from scipy.io.matlab._miobase import (
matdims, MatWriteError, MatReadError, matfile_version)
from scipy.io.matlab._mio import mat_reader_factory, loadmat, savemat, whosmat
from scipy.io.matlab._mio5 import (
MatFile5Writer, MatFile5Reader, varmats_from_mat, to_writeable,
EmptyStructMarker)
import scipy.io.matlab._mio5_params as mio5p
test_data_path = pjoin(dirname(__file__), 'data')
def mlarr(*args, **kwargs):
"""Convenience function to return matlab-compatible 2-D array."""
arr = np.array(*args, **kwargs)
arr.shape = matdims(arr)
return arr
# Define cases to test
theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)
case_table4 = [
{'name': 'double',
'classes': {'testdouble': 'double'},
'expected': {'testdouble': theta}
}]
case_table4.append(
{'name': 'string',
'classes': {'teststring': 'char'},
'expected': {'teststring':
array(['"Do nine men interpret?" "Nine men," I nod.'])}
})
case_table4.append(
{'name': 'complex',
'classes': {'testcomplex': 'double'},
'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}
})
A = np.zeros((3,5))
A[0] = list(range(1,6))
A[:,0] = list(range(1,4))
case_table4.append(
{'name': 'matrix',
'classes': {'testmatrix': 'double'},
'expected': {'testmatrix': A},
})
case_table4.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
B = A.astype(complex)
B[0,0] += 1j
case_table4.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table4.append(
{'name': 'multi',
'classes': {'theta': 'double', 'a': 'double'},
'expected': {'theta': theta, 'a': A},
})
case_table4.append(
{'name': 'minus',
'classes': {'testminus': 'double'},
'expected': {'testminus': mlarr(-1)},
})
case_table4.append(
{'name': 'onechar',
'classes': {'testonechar': 'char'},
'expected': {'testonechar': array(['r'])},
})
# Cell arrays stored as object arrays
CA = mlarr(( # tuple for object array creation
[],
mlarr([1]),
mlarr([[1,2]]),
mlarr([[1,2,3]])), dtype=object).reshape(1,-1)
CA[0,0] = array(
['This cell contains this string and 3 arrays of increasing length'])
case_table5 = [
{'name': 'cell',
'classes': {'testcell': 'cell'},
'expected': {'testcell': CA}}]
CAE = mlarr(( # tuple for object array creation
mlarr(1),
mlarr(2),
mlarr([]),
mlarr([]),
mlarr(3)), dtype=object).reshape(1,-1)
objarr = np.empty((1,1),dtype=object)
objarr[0,0] = mlarr(1)
case_table5.append(
{'name': 'scalarcell',
'classes': {'testscalarcell': 'cell'},
'expected': {'testscalarcell': objarr}
})
case_table5.append(
{'name': 'emptycell',
'classes': {'testemptycell': 'cell'},
'expected': {'testemptycell': CAE}})
case_table5.append(
{'name': 'stringarray',
'classes': {'teststringarray': 'char'},
'expected': {'teststringarray': array(
['one ', 'two ', 'three'])},
})
case_table5.append(
{'name': '3dmatrix',
'classes': {'test3dmatrix': 'double'},
'expected': {
'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}
})
st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)
dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]
st1 = np.zeros((1,1), dtype)
st1['stringfield'][0,0] = array(['Rats live on no evil star.'])
st1['doublefield'][0,0] = st_sub_arr
st1['complexfield'][0,0] = st_sub_arr * (1 + 1j)
case_table5.append(
{'name': 'struct',
'classes': {'teststruct': 'struct'},
'expected': {'teststruct': st1}
})
CN = np.zeros((1,2), dtype=object)
CN[0,0] = mlarr(1)
CN[0,1] = np.zeros((1,3), dtype=object)
CN[0,1][0,0] = mlarr(2, dtype=np.uint8)
CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)
CN[0,1][0,2] = np.zeros((1,2), dtype=object)
CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)
CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)
case_table5.append(
{'name': 'cellnest',
'classes': {'testcellnest': 'cell'},
'expected': {'testcellnest': CN},
})
st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])
st2[0,0]['one'] = mlarr(1)
st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])
st2[0,0]['two'][0,0]['three'] = array(['number 3'])
case_table5.append(
{'name': 'structnest',
'classes': {'teststructnest': 'struct'},
'expected': {'teststructnest': st2}
})
a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])
a[0,0]['one'] = mlarr(1)
a[0,0]['two'] = mlarr(2)
a[0,1]['one'] = array(['number 1'])
a[0,1]['two'] = array(['number 2'])
case_table5.append(
{'name': 'structarr',
'classes': {'teststructarr': 'struct'},
'expected': {'teststructarr': a}
})
ODT = np.dtype([(n, object) for n in
['expr', 'inputExpr', 'args',
'isEmpty', 'numArgs', 'version']])
MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')
m0 = MO[0,0]
m0['expr'] = array(['x'])
m0['inputExpr'] = array([' x = INLINE_INPUTS_{1};'])
m0['args'] = array(['x'])
m0['isEmpty'] = mlarr(0)
m0['numArgs'] = mlarr(1)
m0['version'] = mlarr(1)
case_table5.append(
{'name': 'object',
'classes': {'testobject': 'object'},
'expected': {'testobject': MO}
})
fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')
u_str = fp_u_str.read().decode('utf-8')
fp_u_str.close()
case_table5.append(
{'name': 'unicode',
'classes': {'testunicode': 'char'},
'expected': {'testunicode': array([u_str])}
})
case_table5.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
case_table5.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table5.append(
{'name': 'bool',
'classes': {'testbools': 'logical'},
'expected': {'testbools':
array([[True], [False]])},
})
case_table5_rt = case_table5[:]
# Inline functions can't be concatenated in matlab, so RT only
case_table5_rt.append(
{'name': 'objectarray',
'classes': {'testobjectarray': 'object'},
'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})
def types_compatible(var1, var2):
"""Check if types are same or compatible.
0-D numpy scalars are compatible with bare python scalars.
"""
type1 = type(var1)
type2 = type(var2)
if type1 is type2:
return True
if type1 is np.ndarray and var1.shape == ():
return type(var1.item()) is type2
if type2 is np.ndarray and var2.shape == ():
return type(var2.item()) is type1
return False
def _check_level(label, expected, actual):
""" Check one level of a potentially nested array """
if SP.issparse(expected): # allow different types of sparse matrices
assert_(SP.issparse(actual))
assert_array_almost_equal(actual.toarray(),
expected.toarray(),
err_msg=label,
decimal=5)
return
# Check types are as expected
assert_(types_compatible(expected, actual),
"Expected type %s, got %s at %s" %
(type(expected), type(actual), label))
# A field in a record array may not be an ndarray
# A scalar from a record array will be type np.void
if not isinstance(expected,
(np.void, np.ndarray, MatlabObject)):
assert_equal(expected, actual)
return
# This is an ndarray-like thing
assert_(expected.shape == actual.shape,
msg='Expected shape {}, got {} at {}'.format(expected.shape,
actual.shape,
label))
ex_dtype = expected.dtype
if ex_dtype.hasobject: # array of objects
if isinstance(expected, MatlabObject):
assert_equal(expected.classname, actual.classname)
for i, ev in enumerate(expected):
level_label = "%s, [%d], " % (label, i)
_check_level(level_label, ev, actual[i])
return
if ex_dtype.fields: # probably recarray
for fn in ex_dtype.fields:
level_label = f"{label}, field {fn}, "
_check_level(level_label,
expected[fn], actual[fn])
return
if ex_dtype.type in (str, # string or bool
np.unicode_,
np.bool_):
assert_equal(actual, expected, err_msg=label)
return
# Something numeric
assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)
def _load_check_case(name, files, case):
for file_name in files:
matdict = loadmat(file_name, struct_as_record=True)
label = f"test {name}; file {file_name}"
for k, expected in case.items():
k_label = f"{label}, variable {k}"
assert_(k in matdict, "Missing key at %s" % k_label)
_check_level(k_label, expected, matdict[k])
def _whos_check_case(name, files, case, classes):
for file_name in files:
label = f"test {name}; file {file_name}"
whos = whosmat(file_name)
expected_whos = [
(k, expected.shape, classes[k]) for k, expected in case.items()]
whos.sort()
expected_whos.sort()
assert_equal(whos, expected_whos,
f"{label}: {whos!r} != {expected_whos!r}"
)
# Round trip tests
def _rt_check_case(name, expected, format):
mat_stream = BytesIO()
savemat(mat_stream, expected, format=format)
mat_stream.seek(0)
_load_check_case(name, [mat_stream], expected)
# generator for tests
def _cases(version, filt='test%(name)s_*.mat'):
if version == '4':
cases = case_table4
elif version == '5':
cases = case_table5
else:
assert version == '5_rt'
cases = case_table5_rt
for case in cases:
name = case['name']
expected = case['expected']
if filt is None:
files = None
else:
use_filt = pjoin(test_data_path, filt % dict(name=name))
files = glob(use_filt)
assert len(files) > 0, \
f"No files for test {name} using filter {filt}"
classes = case['classes']
yield name, files, expected, classes
@pytest.mark.parametrize('version', ('4', '5'))
def test_load(version):
for case in _cases(version):
_load_check_case(*case[:3])
@pytest.mark.parametrize('version', ('4', '5'))
def test_whos(version):
for case in _cases(version):
_whos_check_case(*case)
# generator for round trip tests
@pytest.mark.parametrize('version, fmts', [
('4', ['4', '5']),
('5_rt', ['5']),
])
def test_round_trip(version, fmts):
for case in _cases(version, filt=None):
for fmt in fmts:
_rt_check_case(case[0], case[2], fmt)
def test_gzip_simple():
xdense = np.zeros((20,20))
xdense[2,3] = 2.3
xdense[4,5] = 4.5
x = SP.csc_matrix(xdense)
name = 'gzip_test'
expected = {'x':x}
format = '4'
tmpdir = mkdtemp()
try:
fname = pjoin(tmpdir,name)
mat_stream = gzip.open(fname, mode='wb')
savemat(mat_stream, expected, format=format)
mat_stream.close()
mat_stream = gzip.open(fname, mode='rb')
actual = loadmat(mat_stream, struct_as_record=True)
mat_stream.close()
finally:
shutil.rmtree(tmpdir)
assert_array_almost_equal(actual['x'].toarray(),
expected['x'].toarray(),
err_msg=repr(actual))
def test_multiple_open():
# Ticket #1039, on Windows: check that files are not left open
tmpdir = mkdtemp()
try:
x = dict(x=np.zeros((2, 2)))
fname = pjoin(tmpdir, "a.mat")
# Check that file is not left open
savemat(fname, x)
os.unlink(fname)
savemat(fname, x)
loadmat(fname)
os.unlink(fname)
# Check that stream is left open
f = open(fname, 'wb')
savemat(f, x)
f.seek(0)
f.close()
f = open(fname, 'rb')
loadmat(f)
f.seek(0)
f.close()
finally:
shutil.rmtree(tmpdir)
def test_mat73():
# Check any hdf5 files raise an error
filenames = glob(
pjoin(test_data_path, 'testhdf5*.mat'))
assert_(len(filenames) > 0)
for filename in filenames:
fp = open(filename, 'rb')
assert_raises(NotImplementedError,
loadmat,
fp,
struct_as_record=True)
fp.close()
def test_warnings():
# This test is an echo of the previous behavior, which was to raise a
# warning if the user triggered a search for mat files on the Python system
# path. We can remove the test in the next version after upcoming (0.13).
fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')
with warnings.catch_warnings():
warnings.simplefilter('error')
# This should not generate a warning
loadmat(fname, struct_as_record=True)
# This neither
loadmat(fname, struct_as_record=False)
def test_regression_653():
# Saving a dictionary with only invalid keys used to raise an error. Now we
# save this as an empty struct in matlab space.
sio = BytesIO()
savemat(sio, {'d':{1:2}}, format='5')
back = loadmat(sio)['d']
# Check we got an empty struct equivalent
assert_equal(back.shape, (1,1))
assert_equal(back.dtype, np.dtype(object))
assert_(back[0,0] is None)
def test_structname_len():
# Test limit for length of field names in structs
lim = 31
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5')
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5')
def test_4_and_long_field_names_incompatible():
# Long field names option not supported in 4
my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])
assert_raises(ValueError, savemat, BytesIO(),
{'my_struct':my_struct}, format='4', long_field_names=True)
def test_long_field_names():
# Test limit for length of field names in structs
lim = 63
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5',long_field_names=True)
def test_long_field_names_in_struct():
# Regression test - long_field_names was erased if you passed a struct
# within a struct
lim = 63
fldname = 'a' * lim
cell = np.ndarray((1,2),dtype=object)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
cell[0,0] = st1
cell[0,1] = st1
savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)
#
# Check to make sure it fails with long field names off
#
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': cell}, format='5', long_field_names=False)
def test_cell_with_one_thing_in_it():
# Regression test - make a cell array that's 1 x 2 and put two
# strings in it. It works. Make a cell array that's 1 x 1 and put
# a string in it. It should work but, in the old days, it didn't.
cells = np.ndarray((1,2),dtype=object)
cells[0,0] = 'Hello'
cells[0,1] = 'World'
savemat(BytesIO(), {'x': cells}, format='5')
cells = np.ndarray((1,1),dtype=object)
cells[0,0] = 'Hello, world'
savemat(BytesIO(), {'x': cells}, format='5')
def test_writer_properties():
# Tests getting, setting of properties of matrix writer
mfw = MatFile5Writer(BytesIO())
assert_equal(mfw.global_vars, [])
mfw.global_vars = ['avar']
assert_equal(mfw.global_vars, ['avar'])
assert_equal(mfw.unicode_strings, False)
mfw.unicode_strings = True
assert_equal(mfw.unicode_strings, True)
assert_equal(mfw.long_field_names, False)
mfw.long_field_names = True
assert_equal(mfw.long_field_names, True)
def test_use_small_element():
# Test whether we're using small data element or not
sio = BytesIO()
wtr = MatFile5Writer(sio)
# First check size for no sde for name
arr = np.zeros(10)
wtr.put_variables({'aaaaa': arr})
w_sz = len(sio.getvalue())
# Check small name results in largish difference in size
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaa': arr})
assert_(w_sz - len(sio.getvalue()) > 4)
# Whereas increasing name size makes less difference
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaaaa': arr})
assert_(len(sio.getvalue()) - w_sz < 4)
def test_save_dict():
# Test that both dict and OrderedDict can be saved (as recarray),
# loaded as matstruct, and preserve order
ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
for dict_type in (dict, OrderedDict):
# Initialize with tuples to keep order
d = dict_type([('a', 1), ('b', 2)])
stream = BytesIO()
savemat(stream, {'dict': d})
stream.seek(0)
vals = loadmat(stream)['dict']
assert_equal(vals.dtype.names, ('a', 'b'))
assert_array_equal(vals, ab_exp)
def test_1d_shape():
# New 5 behavior is 1D -> row vector
arr = np.arange(5)
for format in ('4', '5'):
# Column is the default
stream = BytesIO()
savemat(stream, {'oned': arr}, format=format)
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1, 5))
# can be explicitly 'column' for oned_as
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='column')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (5,1))
# but different from 'row'
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='row')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1,5))
def test_compression():
arr = np.zeros(100).reshape((5,20))
arr[2,10] = 1
stream = BytesIO()
savemat(stream, {'arr':arr})
raw_len = len(stream.getvalue())
vals = loadmat(stream)
assert_array_equal(vals['arr'], arr)
stream = BytesIO()
savemat(stream, {'arr':arr}, do_compression=True)
compressed_len = len(stream.getvalue())
vals = loadmat(stream)
assert_array_equal(vals['arr'], arr)
assert_(raw_len > compressed_len)
# Concatenate, test later
arr2 = arr.copy()
arr2[0,0] = 1
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
vals = loadmat(stream)
assert_array_equal(vals['arr2'], arr2)
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
vals = loadmat(stream)
assert_array_equal(vals['arr2'], arr2)
def test_single_object():
stream = BytesIO()
savemat(stream, {'A':np.array(1, dtype=object)})
def test_skip_variable():
# Test skipping over the first of two variables in a MAT file
# using mat_reader_factory and put_variables to read them in.
#
# This is a regression test of a problem that's caused by
# using the compressed file reader seek instead of the raw file
# I/O seek when skipping over a compressed chunk.
#
# The problem arises when the chunk is large: this file has
# a 256x256 array of random (uncompressible) doubles.
#
filename = pjoin(test_data_path,'test_skip_variable.mat')
#
# Prove that it loads with loadmat
#
d = loadmat(filename, struct_as_record=True)
assert_('first' in d)
assert_('second' in d)
#
# Make the factory
#
factory, file_opened = mat_reader_factory(filename, struct_as_record=True)
#
# This is where the factory breaks with an error in MatMatrixGetter.to_next
#
d = factory.get_variables('second')
assert_('second' in d)
factory.mat_stream.close()
def test_empty_struct():
# ticket 885
filename = pjoin(test_data_path,'test_empty_struct.mat')
# before ticket fix, this would crash with ValueError, empty data
# type
d = loadmat(filename, struct_as_record=True)
a = d['a']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(object))
assert_(a[0,0] is None)
stream = BytesIO()
arr = np.array((), dtype='U')
# before ticket fix, this used to give data type not understood
savemat(stream, {'arr':arr})
d = loadmat(stream)
a2 = d['arr']
assert_array_equal(a2, arr)
def test_save_empty_dict():
# saving empty dict also gives empty struct
stream = BytesIO()
savemat(stream, {'arr': {}})
d = loadmat(stream)
a = d['arr']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(object))
assert_(a[0,0] is None)
def assert_any_equal(output, alternatives):
""" Assert `output` is equal to at least one element in `alternatives`
"""
one_equal = False
for expected in alternatives:
if np.all(output == expected):
one_equal = True
break
assert_(one_equal)
def test_to_writeable():
# Test to_writeable function
res = to_writeable(np.array([1])) # pass through ndarrays
assert_equal(res.shape, (1,))
assert_array_equal(res, 1)
# Dict fields can be written in any order
expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])
expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])
alternatives = (expected1, expected2)
assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)
# Fields with underscores discarded
assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)
# Not-string fields discarded
assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)
# String fields that are valid Python identifiers discarded
assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)
# Object with field names is equivalent
class klass:
pass
c = klass
c.a = 1
c.b = 2
assert_any_equal(to_writeable(c), alternatives)
# empty list and tuple go to empty array
res = to_writeable([])
assert_equal(res.shape, (0,))
assert_equal(res.dtype.type, np.float64)
res = to_writeable(())
assert_equal(res.shape, (0,))
assert_equal(res.dtype.type, np.float64)
# None -> None
assert_(to_writeable(None) is None)
# String to strings
assert_equal(to_writeable('a string').dtype.type, np.str_)
# Scalars to numpy to NumPy scalars
res = to_writeable(1)
assert_equal(res.shape, ())
assert_equal(res.dtype.type, np.array(1).dtype.type)
assert_array_equal(res, 1)
# Empty dict returns EmptyStructMarker
assert_(to_writeable({}) is EmptyStructMarker)
# Object does not have (even empty) __dict__
assert_(to_writeable(object()) is None)
# Custom object does have empty __dict__, returns EmptyStructMarker
class C:
pass
assert_(to_writeable(c()) is EmptyStructMarker)
# dict keys with legal characters are convertible
res = to_writeable({'a': 1})['a']
assert_equal(res.shape, (1,))
assert_equal(res.dtype.type, np.object_)
# Only fields with illegal characters, falls back to EmptyStruct
assert_(to_writeable({'1':1}) is EmptyStructMarker)
assert_(to_writeable({'_a':1}) is EmptyStructMarker)
# Unless there are valid fields, in which case structured array
assert_equal(to_writeable({'1':1, 'f': 2}),
np.array([(2,)], dtype=[('f', '|O8')]))
def test_recarray():
# check roundtrip of structured array
dt = [('f1', 'f8'),
('f2', 'S10')]
arr = np.zeros((2,), dtype=dt)
arr[0]['f1'] = 0.5
arr[0]['f2'] = 'python'
arr[1]['f1'] = 99
arr[1]['f2'] = 'not perl'
stream = BytesIO()
savemat(stream, {'arr': arr})
d = loadmat(stream, struct_as_record=False)
a20 = d['arr'][0,0]
assert_equal(a20.f1, 0.5)
assert_equal(a20.f2, 'python')
d = loadmat(stream, struct_as_record=True)
a20 = d['arr'][0,0]
assert_equal(a20['f1'], 0.5)
assert_equal(a20['f2'], 'python')
# structs always come back as object types
assert_equal(a20.dtype, np.dtype([('f1', 'O'),
('f2', 'O')]))
a21 = d['arr'].flat[1]
assert_equal(a21['f1'], 99)
assert_equal(a21['f2'], 'not perl')
def test_save_object():
class C:
pass
c = C()
c.field1 = 1
c.field2 = 'a string'
stream = BytesIO()
savemat(stream, {'c': c})
d = loadmat(stream, struct_as_record=False)
c2 = d['c'][0,0]
assert_equal(c2.field1, 1)
assert_equal(c2.field2, 'a string')
d = loadmat(stream, struct_as_record=True)
c2 = d['c'][0,0]
assert_equal(c2['field1'], 1)
assert_equal(c2['field2'], 'a string')
def test_read_opts():
# tests if read is seeing option sets, at initialization and after
# initialization
arr = np.arange(6).reshape(1,6)
stream = BytesIO()
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
back_dict = rdr.get_variables()
rarr = back_dict['a']
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, squeeze_me=True)
assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
rdr.squeeze_me = False
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, byte_order=boc.native_code)
assert_array_equal(rdr.get_variables()['a'], arr)
# inverted byte code leads to error on read because of swapped
# header etc.
rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
assert_raises(Exception, rdr.get_variables)
rdr.byte_order = boc.native_code
assert_array_equal(rdr.get_variables()['a'], arr)
arr = np.array(['a string'])
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
assert_array_equal(rdr.get_variables()['a'], arr)
rdr = MatFile5Reader(stream, chars_as_strings=False)
carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
assert_array_equal(rdr.get_variables()['a'], carr)
rdr.chars_as_strings = True
assert_array_equal(rdr.get_variables()['a'], arr)
def test_empty_string():
# make sure reading empty string does not raise error
estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
fp = open(estring_fname, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['a'], np.array([], dtype='U1'))
# Empty string round trip. Matlab cannot distinguish
# between a string array that is empty, and a string array
# containing a single empty string, because it stores strings as
# arrays of char. There is no way of having an array of char that
# is not empty, but contains an empty string.
stream = BytesIO()
savemat(stream, {'a': np.array([''])})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': np.array([], dtype='U1')})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.close()
def test_corrupted_data():
import zlib
for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),
(zlib.error, 'corrupted_zlib_checksum.mat')]:
with open(pjoin(test_data_path, fname), 'rb') as fp:
rdr = MatFile5Reader(fp)
assert_raises(exc, rdr.get_variables)
def test_corrupted_data_check_can_be_disabled():
with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:
rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)
rdr.get_variables()
def test_read_both_endian():
# make sure big- and little- endian data is read correctly
for fname in ('big_endian.mat', 'little_endian.mat'):
fp = open(pjoin(test_data_path, fname), 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['strings'],
np.array([['hello'],
['world']], dtype=object))
assert_array_equal(d['floats'],
np.array([[2., 3.],
[3., 4.]], dtype=np.float32))
def test_write_opposite_endian():
# We don't support writing opposite endian .mat files, but we need to behave
# correctly if the user supplies an other-endian NumPy array to write out.
float_arr = np.array([[2., 3.],
[3., 4.]])
int_arr = np.arange(6).reshape((2, 3))
uni_arr = np.array(['hello', 'world'], dtype='U')
stream = BytesIO()
savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),
'ints': int_arr.byteswap().newbyteorder(),
'uni_arr': uni_arr.byteswap().newbyteorder()})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['floats'], float_arr)
assert_array_equal(d['ints'], int_arr)
assert_array_equal(d['uni_arr'], uni_arr)
stream.close()
def test_logical_array():
# The roundtrip test doesn't verify that we load the data up with the
# correct (bool) dtype
with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:
rdr = MatFile5Reader(fobj, mat_dtype=True)
d = rdr.get_variables()
x = np.array([[True], [False]], dtype=np.bool_)
assert_array_equal(d['testbools'], x)
assert_equal(d['testbools'].dtype, x.dtype)
def test_logical_out_type():
# Confirm that bool type written as uint8, uint8 class
# See gh-4022
stream = BytesIO()
barr = np.array([False, True, False])
savemat(stream, {'barray': barr})
stream.seek(0)
reader = MatFile5Reader(stream)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)
assert_equal(hdr.is_logical, True)
var = reader.read_var_array(hdr, False)
assert_equal(var.dtype.type, np.uint8)
def test_roundtrip_zero_dimensions():
stream = BytesIO()
savemat(stream, {'d':np.empty((10, 0))})
d = loadmat(stream)
assert d['d'].shape == (10, 0)
def test_mat4_3d():
# test behavior when writing 3-D arrays to matlab 4 files
stream = BytesIO()
arr = np.arange(24).reshape((2,3,4))
assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')
def test_func_read():
func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert isinstance(d['testfunc'], MatlabFunction)
stream = BytesIO()
wtr = MatFile5Writer(stream)
assert_raises(MatWriteError, wtr.put_variables, d)
def test_mat_dtype():
double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=False)
d = rdr.get_variables()
fp.close()
assert_equal(d['testmatrix'].dtype.kind, 'u')
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=True)
d = rdr.get_variables()
fp.close()
assert_equal(d['testmatrix'].dtype.kind, 'f')
def test_sparse_in_struct():
# reproduces bug found by DC where Cython code was insisting on
# ndarray return type, but getting sparse matrix
st = {'sparsefield': SP.coo_matrix(np.eye(4))}
stream = BytesIO()
savemat(stream, {'a':st})
d = loadmat(stream, struct_as_record=True)
assert_array_equal(d['a'][0, 0]['sparsefield'].toarray(), np.eye(4))
def test_mat_struct_squeeze():
stream = BytesIO()
in_d = {'st':{'one':1, 'two':2}}
savemat(stream, in_d)
# no error without squeeze
loadmat(stream, struct_as_record=False)
# previous error was with squeeze, with mat_struct
loadmat(stream, struct_as_record=False, squeeze_me=True)
def test_scalar_squeeze():
stream = BytesIO()
in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
savemat(stream, in_d)
out_d = loadmat(stream, squeeze_me=True)
assert_(isinstance(out_d['scalar'], float))
assert_(isinstance(out_d['string'], str))
assert_(isinstance(out_d['st'], np.ndarray))
def test_str_round():
# from report by Angus McMorland on mailing list 3 May 2010
stream = BytesIO()
in_arr = np.array(['Hello', 'Foob'])
out_arr = np.array(['Hello', 'Foob '])
savemat(stream, dict(a=in_arr))
res = loadmat(stream)
# resulted in ['HloolFoa', 'elWrdobr']
assert_array_equal(res['a'], out_arr)
stream.truncate(0)
stream.seek(0)
# Make Fortran ordered version of string
in_str = in_arr.tobytes(order='F')
in_from_str = np.ndarray(shape=a.shape,
dtype=in_arr.dtype,
order='F',
buffer=in_str)
savemat(stream, dict(a=in_from_str))
assert_array_equal(res['a'], out_arr)
# unicode save did lead to buffer too small error
stream.truncate(0)
stream.seek(0)
in_arr_u = in_arr.astype('U')
out_arr_u = out_arr.astype('U')
savemat(stream, {'a': in_arr_u})
res = loadmat(stream)
assert_array_equal(res['a'], out_arr_u)
def test_fieldnames():
# Check that field names are as expected
stream = BytesIO()
savemat(stream, {'a': {'a':1, 'b':2}})
res = loadmat(stream)
field_names = res['a'].dtype.names
assert_equal(set(field_names), {'a', 'b'})
def test_loadmat_varnames():
# Test that we can get just one variable from a mat file using loadmat
mat5_sys_names = ['__globals__',
'__header__',
'__version__']
for eg_file, sys_v_names in (
(pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(
test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):
vars = loadmat(eg_file)
assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names='a')
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['a'])
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['theta'])
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names=('theta',))
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names=[])
assert_equal(set(vars.keys()), set(sys_v_names))
vnames = ['theta']
vars = loadmat(eg_file, variable_names=vnames)
assert_equal(vnames, ['theta'])
def test_round_types():
# Check that saving, loading preserves dtype in most cases
arr = np.arange(10)
stream = BytesIO()
for dts in ('f8','f4','i8','i4','i2','i1',
'u8','u4','u2','u1','c16','c8'):
stream.truncate(0)
stream.seek(0) # needed for BytesIO in Python 3
savemat(stream, {'arr': arr.astype(dts)})
vars = loadmat(stream)
assert_equal(np.dtype(dts), vars['arr'].dtype)
def test_varmats_from_mat():
# Make a mat file with several variables, write it, read it back
names_vars = (('arr', mlarr(np.arange(10))),
('mystr', mlarr('a string')),
('mynum', mlarr(10)))
# Dict like thing to give variables in defined order
class C:
def items(self):
return names_vars
stream = BytesIO()
savemat(stream, C())
varmats = varmats_from_mat(stream)
assert_equal(len(varmats), 3)
for i in range(3):
name, var_stream = varmats[i]
exp_name, exp_res = names_vars[i]
assert_equal(name, exp_name)
res = loadmat(var_stream)
assert_array_equal(res[name], exp_res)
def test_one_by_zero():
# Test 1x0 chars get read correctly
func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_equal(d['var'].shape, (0,))
def test_load_mat4_le():
# We were getting byte order wrong when reading little-endian floa64 dense
# matrices on big-endian platforms
mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')
vars = loadmat(mat4_fname)
assert_array_equal(vars['a'], [[0.1, 1.2]])
def test_unicode_mat4():
# Mat4 should save unicode as latin1
bio = BytesIO()
var = {'second_cat': 'Schrödinger'}
savemat(bio, var, format='4')
var_back = loadmat(bio)
assert_equal(var_back['second_cat'], var['second_cat'])
def test_logical_sparse():
# Test we can read logical sparse stored in mat file as bytes.
# See https://github.com/scipy/scipy/issues/3539.
# In some files saved by MATLAB, the sparse data elements (Real Part
# Subelement in MATLAB speak) are stored with apparent type double
# (miDOUBLE) but are in fact single bytes.
filename = pjoin(test_data_path,'logical_sparse.mat')
# Before fix, this would crash with:
# ValueError: indices and data should have the same size
d = loadmat(filename, struct_as_record=True)
log_sp = d['sp_log_5_4']
assert_(isinstance(log_sp, SP.csc_matrix))
assert_equal(log_sp.dtype.type, np.bool_)
assert_array_equal(log_sp.toarray(),
[[True, True, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False]])
def test_empty_sparse():
# Can we read empty sparse matrices?
sio = BytesIO()
import scipy.sparse
empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
savemat(sio, dict(x=empty_sparse))
sio.seek(0)
res = loadmat(sio)
assert_array_equal(res['x'].shape, empty_sparse.shape)
assert_array_equal(res['x'].toarray(), 0)
# Do empty sparse matrices get written with max nnz 1?
# See https://github.com/scipy/scipy/issues/4208
sio.seek(0)
reader = MatFile5Reader(sio)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.nzmax, 1)
def test_empty_mat_error():
# Test we get a specific warning for an empty mat file
sio = BytesIO()
assert_raises(MatReadError, loadmat, sio)
def test_miuint32_compromise():
# Reader should accept miUINT32 for miINT32, but check signs
# mat file with miUINT32 for miINT32, but OK values
filename = pjoin(test_data_path, 'miuint32_for_miint32.mat')
res = loadmat(filename)
assert_equal(res['an_array'], np.arange(10)[None, :])
# mat file with miUINT32 for miINT32, with negative value
filename = pjoin(test_data_path, 'bad_miuint32.mat')
with assert_raises(ValueError):
loadmat(filename)
def test_miutf8_for_miint8_compromise():
# Check reader accepts ascii as miUTF8 for array names
filename = pjoin(test_data_path, 'miutf8_array_name.mat')
res = loadmat(filename)
assert_equal(res['array_name'], [[1]])
# mat file with non-ascii utf8 name raises error
filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')
with assert_raises(ValueError):
loadmat(filename)
def test_bad_utf8():
# Check that reader reads bad UTF with 'replace' option
filename = pjoin(test_data_path,'broken_utf8.mat')
res = loadmat(filename)
assert_equal(res['bad_string'],
b'\x80 am broken'.decode('utf8', 'replace'))
def test_save_unicode_field(tmpdir):
filename = os.path.join(str(tmpdir), 'test.mat')
test_dict = {'a':{'b':1,'c':'test_str'}}
savemat(filename, test_dict)
def test_save_custom_array_type(tmpdir):
class CustomArray:
def __array__(self):
return np.arange(6.0).reshape(2, 3)
a = CustomArray()
filename = os.path.join(str(tmpdir), 'test.mat')
savemat(filename, {'a': a})
out = loadmat(filename)
assert_array_equal(out['a'], np.array(a))
def test_filenotfound():
# Check the correct error is thrown
assert_raises(OSError, loadmat, "NotExistentFile00.mat")
assert_raises(OSError, loadmat, "NotExistentFile00")
def test_simplify_cells():
# Test output when simplify_cells=True
filename = pjoin(test_data_path, 'testsimplecell.mat')
res1 = loadmat(filename, simplify_cells=True)
res2 = loadmat(filename, simplify_cells=False)
assert_(isinstance(res1["s"], dict))
assert_(isinstance(res2["s"], np.ndarray))
assert_array_equal(res1["s"]["mycell"], np.array(["a", "b", "c"]))
@pytest.mark.parametrize('version, filt, regex', [
(0, '_4*_*', None),
(1, '_5*_*', None),
(1, '_6*_*', None),
(1, '_7*_*', '^((?!hdf5).)*$'), # not containing hdf5
(2, '_7*_*', '.*hdf5.*'),
(1, '8*_*', None),
])
def test_matfile_version(version, filt, regex):
use_filt = pjoin(test_data_path, 'test*%s.mat' % filt)
files = glob(use_filt)
if regex is not None:
files = [file for file in files if re.match(regex, file) is not None]
assert len(files) > 0, \
f"No files for version {version} using filter {filt}"
for file in files:
got_version = matfile_version(file)
assert got_version[0] == version
def test_opaque():
"""Test that we can read a MatlabOpaque object."""
data = loadmat(pjoin(test_data_path, 'parabola.mat'))
assert isinstance(data['parabola'], MatlabFunction)
assert isinstance(data['parabola'].item()[3].item()[3], MatlabOpaque)
def test_opaque_simplify():
"""Test that we can read a MatlabOpaque object when simplify_cells=True."""
data = loadmat(pjoin(test_data_path, 'parabola.mat'), simplify_cells=True)
assert isinstance(data['parabola'], MatlabFunction)
def test_deprecation():
"""Test that access to previous attributes still works."""
# This should be accessible immediately from scipy.io import
with assert_warns(DeprecationWarning):
scipy.io.matlab.mio5_params.MatlabOpaque # noqa
# These should be importable but warn as well
with assert_warns(DeprecationWarning):
from scipy.io.matlab.miobase import MatReadError # noqa
def test_gh_17992(tmp_path):
rng = np.random.default_rng(12345)
outfile = tmp_path / "lists.mat"
array_one = rng.random((5,3))
array_two = rng.random((6,3))
list_of_arrays = [array_one, array_two]
# warning suppression only needed for NumPy < 1.24.0
with np.testing.suppress_warnings() as sup:
sup.filter(np.VisibleDeprecationWarning)
savemat(outfile,
{'data': list_of_arrays},
long_field_names=True,
do_compression=True)
# round trip check
new_dict = {}
loadmat(outfile,
new_dict)
assert_allclose(new_dict["data"][0][0], array_one)
assert_allclose(new_dict["data"][0][1], array_two)
| 44,564
| 32.532731
| 80
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/test_pathological.py
|
""" Test reading of files not conforming to matlab specification
We try and read any file that matlab reads, these files included
"""
from os.path import dirname, join as pjoin
from numpy.testing import assert_
from pytest import raises as assert_raises
from scipy.io.matlab._mio import loadmat
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
def test_multiple_fieldnames():
# Example provided by Dharhas Pothina
# Extracted using mio5.varmats_from_mat
multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat')
vars = loadmat(multi_fname)
funny_names = vars['Summary'].dtype.names
assert_({'_1_Station_Q', '_2_Station_Q',
'_3_Station_Q'}.issubset(funny_names))
def test_malformed1():
# Example from gh-6072
# Contains malformed header data, which previously resulted into a
# buffer overflow.
#
# Should raise an exception, not segfault
fname = pjoin(TEST_DATA_PATH, 'malformed1.mat')
with open(fname, 'rb') as f:
assert_raises(ValueError, loadmat, f)
| 1,055
| 30.058824
| 73
|
py
|
scipy
|
scipy-main/scipy/io/matlab/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/io/arff/arffread.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io.arff` namespace for importing the functions
# included below.
import warnings
from . import _arffread
__all__ = [ # noqa: F822
'MetaData', 'loadarff', 'ArffError', 'ParseArffError',
'r_meta', 'r_comment', 'r_empty', 'r_headerline',
'r_datameta', 'r_relation', 'r_attribute', 'r_nominal',
'r_date', 'r_comattrval', 'r_wcomattrval', 'Attribute',
'NominalAttribute', 'NumericAttribute', 'StringAttribute',
'DateAttribute', 'RelationalAttribute', 'to_attribute',
'csv_sniffer_has_bug_last_field', 'workaround_csv_sniffer_bug_last_field',
'split_data_line', 'tokenize_attribute', 'tokenize_single_comma',
'tokenize_single_wcomma', 'read_relational_attribute', 'read_header',
'basic_stats', 'print_attribute', 'test_weka'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.arff.arffread is deprecated and has no attribute "
f"{name}. Try looking in scipy.io.arff instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io.arff` namespace, "
"the `scipy.io.arff.arffread` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_arffread, name)
| 1,364
| 35.891892
| 78
|
py
|
scipy
|
scipy-main/scipy/io/arff/setup.py
|
def configuration(parent_package='io',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('arff', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 343
| 30.272727
| 60
|
py
|
scipy
|
scipy-main/scipy/io/arff/__init__.py
|
"""
Module to read ARFF files
=========================
ARFF is the standard data format for WEKA.
It is a text file format which support numerical, string and data values.
The format can also represent missing data and sparse data.
Notes
-----
The ARFF support in ``scipy.io`` provides file reading functionality only.
For more extensive ARFF functionality, see `liac-arff
<https://github.com/renatopp/liac-arff>`_.
See the `WEKA website <http://weka.wikispaces.com/ARFF>`_
for more details about the ARFF format and available datasets.
"""
from ._arffread import *
from . import _arffread
# Deprecated namespaces, to be removed in v2.0.0
from .import arffread
__all__ = _arffread.__all__ + ['arffread']
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 805
| 26.793103
| 74
|
py
|
scipy
|
scipy-main/scipy/io/arff/_arffread.py
|
# Last Change: Mon Aug 20 08:00 PM 2007 J
import re
import datetime
import numpy as np
import csv
import ctypes
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info
# is lost!
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile(r'^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^\s*@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
r_nominal = re.compile(r'{(.+)}')
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
# ------------------------
# Module defined exception
# ------------------------
class ArffError(OSError):
pass
class ParseArffError(ArffError):
pass
# ----------
# Attributes
# ----------
class Attribute:
type_name = None
def __init__(self, name):
self.name = name
self.range = None
self.dtype = np.object_
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
"""
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
return None
def __str__(self):
"""
Parse a value of this type.
"""
return self.name + ',' + self.type_name
class NominalAttribute(Attribute):
type_name = 'nominal'
def __init__(self, name, values):
super().__init__(name)
self.values = values
self.range = values
self.dtype = (np.string_, max(len(i) for i in values))
@staticmethod
def _get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
m = r_nominal.match(atrv)
if m:
attrs, _ = split_data_line(m.group(1))
return tuple(attrs)
else:
raise ValueError("This does not look like a nominal string")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For nominal attributes, the attribute string would be like '{<attr_1>,
<attr2>, <attr_3>}'.
"""
if attr_string[0] == '{':
values = cls._get_nom_val(attr_string)
return cls(name, values)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
if data_str in self.values:
return data_str
elif data_str == '?':
return data_str
else:
raise ValueError("{} value not in {}".format(str(data_str),
str(self.values)))
def __str__(self):
msg = self.name + ",{"
for i in range(len(self.values)-1):
msg += self.values[i] + ","
msg += self.values[-1]
msg += "}"
return msg
class NumericAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'numeric'
self.dtype = np.float_
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For numeric attributes, the attribute string would be like
'numeric' or 'int' or 'real'.
"""
attr_string = attr_string.lower().strip()
if (attr_string[:len('numeric')] == 'numeric' or
attr_string[:len('int')] == 'int' or
attr_string[:len('real')] == 'real'):
return cls(name)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
Parameters
----------
data_str : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> atr = NumericAttribute('atr')
>>> atr.parse_data('1')
1.0
>>> atr.parse_data('1\\n')
1.0
>>> atr.parse_data('?\\n')
nan
"""
if '?' in data_str:
return np.nan
else:
return float(data_str)
def _basic_stats(self, data):
nbfac = data.size * 1. / (data.size - 1)
return (np.nanmin(data), np.nanmax(data),
np.mean(data), np.std(data) * nbfac)
class StringAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'string'
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For string attributes, the attribute string would be like
'string'.
"""
attr_string = attr_string.lower().strip()
if attr_string[:len('string')] == 'string':
return cls(name)
else:
return None
class DateAttribute(Attribute):
def __init__(self, name, date_format, datetime_unit):
super().__init__(name)
self.date_format = date_format
self.datetime_unit = datetime_unit
self.type_name = 'date'
self.range = date_format
self.dtype = np.datetime64(0, self.datetime_unit)
@staticmethod
def _get_date_format(atrv):
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not "
"supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('date')] == 'date':
date_format, datetime_unit = cls._get_date_format(attr_string)
return cls(name, date_format, datetime_unit)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
date_str = data_str.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', self.datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, self.date_format)
return np.datetime64(dt).astype(
"datetime64[%s]" % self.datetime_unit)
def __str__(self):
return super().__str__() + ',' + self.date_format
class RelationalAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'relational'
self.dtype = np.object_
self.attributes = []
self.dialect = None
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('relational')] == 'relational':
return cls(name)
else:
return None
def parse_data(self, data_str):
# Copy-pasted
elems = list(range(len(self.attributes)))
escaped_string = data_str.encode().decode("unicode-escape")
row_tuples = []
for raw in escaped_string.split("\n"):
row, self.dialect = split_data_line(raw, self.dialect)
row_tuples.append(tuple(
[self.attributes[i].parse_data(row[i]) for i in elems]))
return np.array(row_tuples,
[(a.name, a.dtype) for a in self.attributes])
def __str__(self):
return (super().__str__() + '\n\t' +
'\n\t'.join(str(a) for a in self.attributes))
# -----------------
# Various utilities
# -----------------
def to_attribute(name, attr_string):
attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
StringAttribute, RelationalAttribute)
for cls in attr_classes:
attr = cls.parse_attribute(name, attr_string)
if attr is not None:
return attr
raise ParseArffError("unknown attribute %s" % attr_string)
def csv_sniffer_has_bug_last_field():
"""
Checks if the bug https://bugs.python.org/issue30157 is unpatched.
"""
# We only compute this once.
has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
if has_bug is None:
dialect = csv.Sniffer().sniff("3, 'a'")
csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
has_bug = csv_sniffer_has_bug_last_field.has_bug
return has_bug
def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
"""
Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
"""
if csv_sniffer_has_bug_last_field():
# Reuses code from the csv module
right_regex = r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # .*?",
right_regex, # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(sniff_line)
if matches:
break
# If it does not match the expression that was bugged, then this bug does not apply
if restr != right_regex:
return
groupindex = regexp.groupindex
# There is only one end of the string
assert len(matches) == 1
m = matches[0]
n = groupindex['quote'] - 1
quote = m[n]
n = groupindex['delim'] - 1
delim = m[n]
n = groupindex['space'] - 1
space = bool(m[n])
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" %
{'delim': re.escape(delim), 'quote': quote}, re.MULTILINE
)
doublequote = bool(dq_regexp.search(sniff_line))
dialect.quotechar = quote
if delim in delimiters:
dialect.delimiter = delim
dialect.doublequote = doublequote
dialect.skipinitialspace = space
def split_data_line(line, dialect=None):
delimiters = ",\t"
# This can not be done in a per reader basis, and relational fields
# can be HUGE
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
# Remove the line end if any
if line[-1] == '\n':
line = line[:-1]
# Remove potential trailing whitespace
line = line.strip()
sniff_line = line
# Add a delimiter if none is present, so that the csv.Sniffer
# does not complain for a single-field CSV.
if not any(d in line for d in delimiters):
sniff_line += ","
if dialect is None:
dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
dialect=dialect,
delimiters=delimiters)
row = next(csv.reader([line], dialect))
return row, dialect
# --------------
# Parsing header
# --------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (e.g., starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
else:
raise ValueError("First line unparsable: %s" % sattr)
attribute = to_attribute(name, type)
if type.lower() == 'relational':
next_item = read_relational_attribute(iterable, attribute, next_item)
# raise ValueError("relational attributes not supported yet")
return attribute, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError as e:
raise ValueError("Error while tokenizing attribute") from e
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError as e:
raise ValueError("Error while tokenizing attribute") from e
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_relational_attribute(ofile, relational_attribute, i):
"""Read the nested attributes of a relational attribute"""
r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
relational_attribute.name + r'\s*$')
while not r_end_relational.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
relational_attribute.attributes.append(attr)
else:
raise ValueError("Error parsing line %s" % i)
else:
i = next(ofile)
i = next(ofile)
return i
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
attributes.append(attr)
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
class MetaData:
"""Small container to keep useful information on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
::
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print(i)
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Methods
-------
names
types
Notes
-----
Also maintains the list of attributes in order, i.e., doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
self._attributes = {a.name: a for a in attr}
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attributes:
msg += f"\t{i}'s type is {self._attributes[i].type_name}"
if self._attributes[i].range:
msg += ", range is %s" % str(self._attributes[i].range)
msg += '\n'
return msg
def __iter__(self):
return iter(self._attributes)
def __getitem__(self, key):
attr = self._attributes[key]
return (attr.type_name, attr.range)
def names(self):
"""Return the list of attribute names.
Returns
-------
attrnames : list of str
The attribute names.
"""
return list(self._attributes)
def types(self):
"""Return the list of attribute types.
Returns
-------
attr_types : list of str
The attribute types.
"""
attr_types = [self._attributes[name].type_name
for name in self._attributes]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of NumPy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc.
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
Examples
--------
>>> from scipy.io import arff
>>> from io import StringIO
>>> content = \"\"\"
... @relation foo
... @attribute width numeric
... @attribute height numeric
... @attribute color {red,green,blue,yellow,black}
... @data
... 5.0,3.25,blue
... 4.5,3.75,green
... 3.0,4.00,red
... \"\"\"
>>> f = StringIO(content)
>>> data, meta = arff.loadarff(f)
>>> data
array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
>>> meta
Dataset: foo
\twidth's type is numeric
\theight's type is numeric
\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f)
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg) from e
# Check whether we have a string attribute (not supported yet)
hasstr = False
for a in attr:
if isinstance(a, StringAttribute):
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
if hasstr:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(attr)
def generator(row_iter, delim=','):
# TODO: this is where we are spending time (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
dialect = None
for raw in row_iter:
# We do not abstract skipping comments and empty lines for
# performance reasons.
if r_comment.match(raw) or r_empty.match(raw):
continue
row, dialect = split_data_line(raw, dialect)
yield tuple([attr[i].parse_data(row[i]) for i in elems])
a = list(generator(ofile))
# No error should happen here: it is a bug otherwise
data = np.array(a, [(a.name, a.dtype) for a in attr])
return data, meta
# ----
# Misc
# ----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp.type_name
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print(f"{name},{type},{min:f},{max:f},{mean:f},{std:f}")
else:
print(str(tp))
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i, meta[i], data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
import sys
filename = sys.argv[1]
test_weka(filename)
| 26,359
| 28.094923
| 110
|
py
|
scipy
|
scipy-main/scipy/io/arff/tests/test_arffread.py
|
import datetime
import os
import sys
from os.path import join as pjoin
from io import StringIO
import numpy as np
from numpy.testing import (assert_array_almost_equal,
assert_array_equal, assert_equal, assert_)
from pytest import raises as assert_raises
from scipy.io.arff import loadarff
from scipy.io.arff._arffread import read_header, ParseArffError
data_path = pjoin(os.path.dirname(__file__), 'data')
test1 = pjoin(data_path, 'test1.arff')
test2 = pjoin(data_path, 'test2.arff')
test3 = pjoin(data_path, 'test3.arff')
test4 = pjoin(data_path, 'test4.arff')
test5 = pjoin(data_path, 'test5.arff')
test6 = pjoin(data_path, 'test6.arff')
test7 = pjoin(data_path, 'test7.arff')
test8 = pjoin(data_path, 'test8.arff')
test9 = pjoin(data_path, 'test9.arff')
test10 = pjoin(data_path, 'test10.arff')
test11 = pjoin(data_path, 'test11.arff')
test_quoted_nominal = pjoin(data_path, 'quoted_nominal.arff')
test_quoted_nominal_spaces = pjoin(data_path, 'quoted_nominal_spaces.arff')
expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
(-0.1, -0.2, -0.3, -0.4, 'class2'),
(1, 2, 3, 4, 'class3')]
expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
missing = pjoin(data_path, 'missing.arff')
expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
expect_missing = np.empty(3, [('yop', float), ('yap', float)])
expect_missing['yop'] = expect_missing_raw[:, 0]
expect_missing['yap'] = expect_missing_raw[:, 1]
class TestData:
def test1(self):
# Parsing trivial file with nothing.
self._test(test4)
def test2(self):
# Parsing trivial file with some comments in the data section.
self._test(test5)
def test3(self):
# Parsing trivial file with nominal attribute of 1 character.
self._test(test6)
def test4(self):
# Parsing trivial file with trailing spaces in attribute declaration.
self._test(test11)
def _test(self, test_file):
data, meta = loadarff(test_file)
for i in range(len(data)):
for j in range(4):
assert_array_almost_equal(expect4_data[i][j], data[i][j])
assert_equal(meta.types(), expected_types)
def test_filelike(self):
# Test reading from file-like object (StringIO)
with open(test1) as f1:
data1, meta1 = loadarff(f1)
with open(test1) as f2:
data2, meta2 = loadarff(StringIO(f2.read()))
assert_(data1 == data2)
assert_(repr(meta1) == repr(meta2))
def test_path(self):
# Test reading from `pathlib.Path` object
from pathlib import Path
with open(test1) as f1:
data1, meta1 = loadarff(f1)
data2, meta2 = loadarff(Path(test1))
assert_(data1 == data2)
assert_(repr(meta1) == repr(meta2))
class TestMissingData:
def test_missing(self):
data, meta = loadarff(missing)
for i in ['yop', 'yap']:
assert_array_almost_equal(data[i], expect_missing[i])
class TestNoData:
def test_nodata(self):
# The file nodata.arff has no data in the @DATA section.
# Reading it should result in an array with length 0.
nodata_filename = os.path.join(data_path, 'nodata.arff')
data, meta = loadarff(nodata_filename)
if sys.byteorder == 'big':
end = '>'
else:
end = '<'
expected_dtype = np.dtype([('sepallength', f'{end}f8'),
('sepalwidth', f'{end}f8'),
('petallength', f'{end}f8'),
('petalwidth', f'{end}f8'),
('class', 'S15')])
assert_equal(data.dtype, expected_dtype)
assert_equal(data.size, 0)
class TestHeader:
def test_type_parsing(self):
# Test parsing type of attribute from their value.
with open(test2) as ofile:
rel, attrs = read_header(ofile)
expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',
'numeric', 'string', 'string', 'nominal', 'nominal']
for i in range(len(attrs)):
assert_(attrs[i].type_name == expected[i])
def test_badtype_parsing(self):
# Test parsing wrong type of attribute from their value.
def badtype_read():
with open(test3) as ofile:
_, _ = read_header(ofile)
assert_raises(ParseArffError, badtype_read)
def test_fullheader1(self):
# Parsing trivial header with nothing.
with open(test1) as ofile:
rel, attrs = read_header(ofile)
# Test relation
assert_(rel == 'test1')
# Test numerical attributes
assert_(len(attrs) == 5)
for i in range(4):
assert_(attrs[i].name == 'attr%d' % i)
assert_(attrs[i].type_name == 'numeric')
# Test nominal attribute
assert_(attrs[4].name == 'class')
assert_(attrs[4].values == ('class0', 'class1', 'class2', 'class3'))
def test_dateheader(self):
with open(test7) as ofile:
rel, attrs = read_header(ofile)
assert_(rel == 'test7')
assert_(len(attrs) == 5)
assert_(attrs[0].name == 'attr_year')
assert_(attrs[0].date_format == '%Y')
assert_(attrs[1].name == 'attr_month')
assert_(attrs[1].date_format == '%Y-%m')
assert_(attrs[2].name == 'attr_date')
assert_(attrs[2].date_format == '%Y-%m-%d')
assert_(attrs[3].name == 'attr_datetime_local')
assert_(attrs[3].date_format == '%Y-%m-%d %H:%M')
assert_(attrs[4].name == 'attr_datetime_missing')
assert_(attrs[4].date_format == '%Y-%m-%d %H:%M')
def test_dateheader_unsupported(self):
def read_dateheader_unsupported():
with open(test8) as ofile:
_, _ = read_header(ofile)
assert_raises(ValueError, read_dateheader_unsupported)
class TestDateAttribute:
def setup_method(self):
self.data, self.meta = loadarff(test7)
def test_year_attribute(self):
expected = np.array([
'1999',
'2004',
'1817',
'2100',
'2013',
'1631'
], dtype='datetime64[Y]')
assert_array_equal(self.data["attr_year"], expected)
def test_month_attribute(self):
expected = np.array([
'1999-01',
'2004-12',
'1817-04',
'2100-09',
'2013-11',
'1631-10'
], dtype='datetime64[M]')
assert_array_equal(self.data["attr_month"], expected)
def test_date_attribute(self):
expected = np.array([
'1999-01-31',
'2004-12-01',
'1817-04-28',
'2100-09-10',
'2013-11-30',
'1631-10-15'
], dtype='datetime64[D]')
assert_array_equal(self.data["attr_date"], expected)
def test_datetime_local_attribute(self):
expected = np.array([
datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),
datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),
datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),
datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),
datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),
datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_local"], expected)
def test_datetime_missing(self):
expected = np.array([
'nat',
'2004-12-01T23:59',
'nat',
'nat',
'2013-11-30T04:55',
'1631-10-15T20:04'
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_missing"], expected)
def test_datetime_timezone(self):
assert_raises(ParseArffError, loadarff, test8)
class TestRelationalAttribute:
def setup_method(self):
self.data, self.meta = loadarff(test9)
def test_attributes(self):
assert_equal(len(self.meta._attributes), 1)
relational = list(self.meta._attributes.values())[0]
assert_equal(relational.name, 'attr_date_number')
assert_equal(relational.type_name, 'relational')
assert_equal(len(relational.attributes), 2)
assert_equal(relational.attributes[0].name,
'attr_date')
assert_equal(relational.attributes[0].type_name,
'date')
assert_equal(relational.attributes[1].name,
'attr_number')
assert_equal(relational.attributes[1].type_name,
'numeric')
def test_data(self):
dtype_instance = [('attr_date', 'datetime64[D]'),
('attr_number', np.float_)]
expected = [
np.array([('1999-01-31', 1), ('1935-11-27', 10)],
dtype=dtype_instance),
np.array([('2004-12-01', 2), ('1942-08-13', 20)],
dtype=dtype_instance),
np.array([('1817-04-28', 3)],
dtype=dtype_instance),
np.array([('2100-09-10', 4), ('1957-04-17', 40),
('1721-01-14', 400)],
dtype=dtype_instance),
np.array([('2013-11-30', 5)],
dtype=dtype_instance),
np.array([('1631-10-15', 6)],
dtype=dtype_instance)
]
for i in range(len(self.data["attr_date_number"])):
assert_array_equal(self.data["attr_date_number"][i],
expected[i])
class TestRelationalAttributeLong:
def setup_method(self):
self.data, self.meta = loadarff(test10)
def test_attributes(self):
assert_equal(len(self.meta._attributes), 1)
relational = list(self.meta._attributes.values())[0]
assert_equal(relational.name, 'attr_relational')
assert_equal(relational.type_name, 'relational')
assert_equal(len(relational.attributes), 1)
assert_equal(relational.attributes[0].name,
'attr_number')
assert_equal(relational.attributes[0].type_name, 'numeric')
def test_data(self):
dtype_instance = [('attr_number', np.float_)]
expected = np.array([(n,) for n in range(30000)],
dtype=dtype_instance)
assert_array_equal(self.data["attr_relational"][0],
expected)
class TestQuotedNominal:
"""
Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes.
"""
def setup_method(self):
self.data, self.meta = loadarff(test_quoted_nominal)
def test_attributes(self):
assert_equal(len(self.meta._attributes), 2)
age, smoker = self.meta._attributes.values()
assert_equal(age.name, 'age')
assert_equal(age.type_name, 'numeric')
assert_equal(smoker.name, 'smoker')
assert_equal(smoker.type_name, 'nominal')
assert_equal(smoker.values, ['yes', 'no'])
def test_data(self):
age_dtype_instance = np.float_
smoker_dtype_instance = '<S3'
age_expected = np.array([
18,
24,
44,
56,
89,
11,
], dtype=age_dtype_instance)
smoker_expected = np.array([
'no',
'yes',
'no',
'no',
'yes',
'no',
], dtype=smoker_dtype_instance)
assert_array_equal(self.data["age"], age_expected)
assert_array_equal(self.data["smoker"], smoker_expected)
class TestQuotedNominalSpaces:
"""
Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes.
"""
def setup_method(self):
self.data, self.meta = loadarff(test_quoted_nominal_spaces)
def test_attributes(self):
assert_equal(len(self.meta._attributes), 2)
age, smoker = self.meta._attributes.values()
assert_equal(age.name, 'age')
assert_equal(age.type_name, 'numeric')
assert_equal(smoker.name, 'smoker')
assert_equal(smoker.type_name, 'nominal')
assert_equal(smoker.values, [' yes', 'no '])
def test_data(self):
age_dtype_instance = np.float_
smoker_dtype_instance = '<S5'
age_expected = np.array([
18,
24,
44,
56,
89,
11,
], dtype=age_dtype_instance)
smoker_expected = np.array([
'no ',
' yes',
'no ',
'no ',
' yes',
'no ',
], dtype=smoker_dtype_instance)
assert_array_equal(self.data["age"], age_expected)
assert_array_equal(self.data["smoker"], smoker_expected)
| 13,084
| 30.303828
| 92
|
py
|
scipy
|
scipy-main/scipy/io/arff/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_harwell_boeing',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 351
| 28.333333
| 69
|
py
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/hb.py
|
"""
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from ._fortran_format_parser import FortranFormatParser, IntFormat, ExpFormat
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo:
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
m = m.tocsc(copy=False)
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containing a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type {} and "
"value type {}".format(mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type {} and "
"value type {}".format(mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError(f"Unsupported format for values {values_format!r}")
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError as e:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value) from e
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
m = m.tocsc(copy=False)
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for Fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType:
"""Class to hold the matrix type."""
# q2f* translates qualified names to Fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = {j: i for i, j in _q2f_type.items()}
_f2q_structure = {j: i for i, j in _q2f_structure.items()}
_f2q_storage = {j: i for i, j in _q2f_storage.items()}
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError as e:
raise ValueError("Unrecognized format %s" % fmt) from e
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile:
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise OSError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(path_or_open_file):
"""Read HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise, it is opened
before reading.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
Examples
--------
We can read and write a harwell-boeing format file:
>>> from scipy.io import hb_read, hb_write
>>> from scipy.sparse import csr_matrix, eye
>>> data = csr_matrix(eye(3)) # create a sparse matrix
>>> hb_write("data.hb", data) # write a hb file
>>> print(hb_read("data.hb")) # read a hb file
(0, 0) 1.0
(1, 1) 1.0
(2, 2) 1.0
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if hasattr(path_or_open_file, 'read'):
return _get_matrix(path_or_open_file)
else:
with open(path_or_open_file) as f:
return _get_matrix(f)
def hb_write(path_or_open_file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise, it is opened
before writing.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
Examples
--------
We can read and write a harwell-boeing format file:
>>> from scipy.io import hb_read, hb_write
>>> from scipy.sparse import csr_matrix, eye
>>> data = csr_matrix(eye(3)) # create a sparse matrix
>>> hb_write("data.hb", data) # write a hb file
>>> print(hb_read("data.hb")) # read a hb file
(0, 0) 1.0
(1, 1) 1.0
(2, 2) 1.0
"""
m = m.tocsc(copy=False)
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if hasattr(path_or_open_file, 'write'):
return _set_matrix(path_or_open_file)
else:
with open(path_or_open_file, 'w') as f:
return _set_matrix(f)
| 19,148
| 32.535902
| 92
|
py
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/_fortran_format_parser.py
|
"""
Preliminary module to handle Fortran formats for IO. Does not use this outside
scipy.sparse io for now, until the API is deemed reasonable.
The *Format classes handle conversion between Fortran and Python format, and
FortranFormatParser can create *Format instances from raw Fortran format
strings (e.g. '(3I4)', '(10I3)', etc...)
"""
import re
import numpy as np
__all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"]
TOKENS = {
"LPAR": r"\(",
"RPAR": r"\)",
"INT_ID": r"I",
"EXP_ID": r"E",
"INT": r"\d+",
"DOT": r"\.",
}
class BadFortranFormat(SyntaxError):
pass
def number_digits(n):
return int(np.floor(np.log10(np.abs(n))) + 1)
class IntFormat:
@classmethod
def from_number(cls, n, min=None):
"""Given an integer, returns a "reasonable" IntFormat instance to represent
any number between 0 and n if n > 0, -n and n if n < 0
Parameters
----------
n : int
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : IntFormat
IntFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
without losing precision. For example, IntFormat.from_number(1) will
return an IntFormat instance of width 2, so that any 0 and 1 may be
represented as 1-character strings without loss of information.
"""
width = number_digits(n) + 1
if n < 0:
width += 1
repeat = 80 // width
return cls(width, min, repeat=repeat)
def __init__(self, width, min=None, repeat=None):
self.width = width
self.repeat = repeat
self.min = min
def __repr__(self):
r = "IntFormat("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width) + "d"
class ExpFormat:
@classmethod
def from_number(cls, n, min=None):
"""Given a float number, returns a "reasonable" ExpFormat instance to
represent any number between -n and n.
Parameters
----------
n : float
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : ExpFormat
ExpFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
to avoid losing precision.
"""
# len of one number in exp format: sign + 1|0 + "." +
# number of digit for fractional part + 'E' + sign of exponent +
# len of exponent
finfo = np.finfo(n.dtype)
# Number of digits for fractional part
n_prec = finfo.precision + 1
# Number of digits for exponential part
n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
width = 1 + 1 + n_prec + 1 + n_exp + 1
if n < 0:
width += 1
repeat = int(np.floor(80 / width))
return cls(width, n_prec, min, repeat=repeat)
def __init__(self, width, significand, min=None, repeat=None):
"""\
Parameters
----------
width : int
number of characters taken by the string (includes space).
"""
self.width = width
self.significand = significand
self.repeat = repeat
self.min = min
def __repr__(self):
r = "ExpFormat("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width-1) + "." + str(self.significand) + "E"
class Token:
def __init__(self, type, value, pos):
self.type = type
self.value = value
self.pos = pos
def __str__(self):
return f"""Token('{self.type}', "{self.value}")"""
def __repr__(self):
return self.__str__()
class Tokenizer:
def __init__(self):
self.tokens = list(TOKENS.keys())
self.res = [re.compile(TOKENS[i]) for i in self.tokens]
def input(self, s):
self.data = s
self.curpos = 0
self.len = len(s)
def next_token(self):
curpos = self.curpos
while curpos < self.len:
for i, r in enumerate(self.res):
m = r.match(self.data, curpos)
if m is None:
continue
else:
self.curpos = m.end()
return Token(self.tokens[i], m.group(), self.curpos)
raise SyntaxError("Unknown character at position %d (%s)"
% (self.curpos, self.data[curpos]))
# Grammar for fortran format:
# format : LPAR format_string RPAR
# format_string : repeated | simple
# repeated : repeat simple
# simple : int_fmt | exp_fmt
# int_fmt : INT_ID width
# exp_fmt : simple_exp_fmt
# simple_exp_fmt : EXP_ID width DOT significand
# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits
# repeat : INT
# width : INT
# significand : INT
# ndigits : INT
# Naive fortran formatter - parser is hand-made
class FortranFormatParser:
"""Parser for Fortran format strings. The parse method returns a *Format
instance.
Notes
-----
Only ExpFormat (exponential format for floating values) and IntFormat
(integer format) for now.
"""
def __init__(self):
self.tokenizer = Tokenizer()
def parse(self, s):
self.tokenizer.input(s)
tokens = []
try:
while True:
t = self.tokenizer.next_token()
if t is None:
break
else:
tokens.append(t)
return self._parse_format(tokens)
except SyntaxError as e:
raise BadFortranFormat(str(e)) from e
def _get_min(self, tokens):
next = tokens.pop(0)
if not next.type == "DOT":
raise SyntaxError()
next = tokens.pop(0)
return next.value
def _expect(self, token, tp):
if not token.type == tp:
raise SyntaxError()
def _parse_format(self, tokens):
if not tokens[0].type == "LPAR":
raise SyntaxError("Expected left parenthesis at position "
"%d (got '%s')" % (0, tokens[0].value))
elif not tokens[-1].type == "RPAR":
raise SyntaxError("Expected right parenthesis at position "
"%d (got '%s')" % (len(tokens), tokens[-1].value))
tokens = tokens[1:-1]
types = [t.type for t in tokens]
if types[0] == "INT":
repeat = int(tokens.pop(0).value)
else:
repeat = None
next = tokens.pop(0)
if next.type == "INT_ID":
next = self._next(tokens, "INT")
width = int(next.value)
if tokens:
min = int(self._get_min(tokens))
else:
min = None
return IntFormat(width, min, repeat)
elif next.type == "EXP_ID":
next = self._next(tokens, "INT")
width = int(next.value)
next = self._next(tokens, "DOT")
next = self._next(tokens, "INT")
significand = int(next.value)
if tokens:
next = self._next(tokens, "EXP_ID")
next = self._next(tokens, "INT")
min = int(next.value)
else:
min = None
return ExpFormat(width, significand, min, repeat)
else:
raise SyntaxError("Invalid formater type %s" % next.value)
def _next(self, tokens, tp):
if not len(tokens) > 0:
raise SyntaxError()
next = tokens.pop(0)
self._expect(next, tp)
return next
| 8,916
| 27.764516
| 83
|
py
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/__init__.py
|
from .hb import (MalformedHeader, hb_read, hb_write, HBInfo,
HBFile, HBMatrixType)
from ._fortran_format_parser import (FortranFormatParser, IntFormat,
ExpFormat, BadFortranFormat)
# Deprecated namespaces, to be removed in v2.0.0
from . import hb
__all__ = [
'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo',
'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat',
'ExpFormat', 'BadFortranFormat', 'hb'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 574
| 30.944444
| 68
|
py
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/tests/test_fortran_format.py
|
import numpy as np
from numpy.testing import assert_equal
from pytest import raises as assert_raises
from scipy.io._harwell_boeing import (
FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat)
class TestFortranFormatParser:
def setup_method(self):
self.parser = FortranFormatParser()
def _test_equal(self, format, ref):
ret = self.parser.parse(format)
assert_equal(ret.__dict__, ref.__dict__)
def test_simple_int(self):
self._test_equal("(I4)", IntFormat(4))
def test_simple_repeated_int(self):
self._test_equal("(3I4)", IntFormat(4, repeat=3))
def test_simple_exp(self):
self._test_equal("(E4.3)", ExpFormat(4, 3))
def test_exp_exp(self):
self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3))
def test_repeat_exp(self):
self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2))
def test_repeat_exp_exp(self):
self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2))
def test_wrong_formats(self):
def _test_invalid(bad_format):
assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format))
_test_invalid("I4")
_test_invalid("(E4)")
_test_invalid("(E4.)")
_test_invalid("(E4.E3)")
class TestIntFormat:
def test_to_fortran(self):
f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)]
res = ["(I10)", "(I12.10)", "(3I12.10)"]
for i, j in zip(f, res):
assert_equal(i.fortran_format, j)
def test_from_number(self):
f = [10, -12, 123456789]
r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20),
IntFormat(10, repeat=8)]
for i, j in zip(f, r_f):
assert_equal(IntFormat.from_number(i).__dict__, j.__dict__)
class TestExpFormat:
def test_to_fortran(self):
f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3),
ExpFormat(10, 5, repeat=3)]
res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"]
for i, j in zip(f, res):
assert_equal(i.fortran_format, j)
def test_from_number(self):
f = np.array([1.0, -1.2])
r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)]
for i, j in zip(f, r_f):
assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__)
| 2,360
| 30.48
| 82
|
py
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/tests/test_hb.py
|
from io import StringIO
import tempfile
import numpy as np
from numpy.testing import assert_equal, \
assert_array_almost_equal_nulp
from scipy.sparse import coo_matrix, csc_matrix, rand
from scipy.io import hb_read, hb_write
SIMPLE = """\
No Title |No Key
9 4 1 4
RUA 100 100 10 0
(26I3) (26I3) (3E23.15)
1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
37 71 89 18 30 45 70 19 25 52
2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
6.912334991524289e-01
"""
SIMPLE_MATRIX = coo_matrix(
((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
0.0661749042483, 0.887037034319, 0.419647859016,
0.564960307211, 0.993442388709, 0.691233499152,),
(np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
[0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
def assert_csc_almost_equal(r, l):
r = csc_matrix(r)
l = csc_matrix(l)
assert_equal(r.indptr, l.indptr)
assert_equal(r.indices, l.indices)
assert_array_almost_equal_nulp(r.data, l.data, 10000)
class TestHBReader:
def test_simple(self):
m = hb_read(StringIO(SIMPLE))
assert_csc_almost_equal(m, SIMPLE_MATRIX)
class TestHBReadWrite:
def check_save_load(self, value):
with tempfile.NamedTemporaryFile(mode='w+t') as file:
hb_write(file, value)
file.file.seek(0)
value_loaded = hb_read(file)
assert_csc_almost_equal(value, value_loaded)
def test_simple(self):
random_matrix = rand(10, 100, 0.1)
for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
matrix = random_matrix.asformat(matrix_format, copy=False)
self.check_save_load(matrix)
| 2,284
| 33.621212
| 79
|
py
|
scipy
|
scipy-main/scipy/io/_harwell_boeing/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/io/tests/test_paths.py
|
"""
Ensure that we can use pathlib.Path objects in all relevant IO functions.
"""
from pathlib import Path
import numpy as np
import scipy.io
import scipy.io.wavfile
from scipy._lib._tmpdirs import tempdir
import scipy.sparse
class TestPaths:
data = np.arange(5).astype(np.int64)
def test_savemat(self):
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(path, {'data': self.data})
assert path.is_file()
def test_loadmat(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(str(path), {'data': self.data})
mat_contents = scipy.io.loadmat(path)
assert (mat_contents['data'] == self.data).all()
def test_whosmat(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
path = Path(temp_dir) / 'data.mat'
scipy.io.savemat(str(path), {'data': self.data})
contents = scipy.io.whosmat(path)
assert contents[0] == ('data', (1, 5), 'int64')
def test_readsav(self):
path = Path(__file__).parent / 'data/scalar_string.sav'
scipy.io.readsav(path)
def test_hb_read(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.hb'
scipy.io.hb_write(str(path), data)
data_new = scipy.io.hb_read(path)
assert (data_new != data).nnz == 0
def test_hb_write(self):
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.hb'
scipy.io.hb_write(path, data)
assert path.is_file()
def test_mmio_read(self):
# Save data with string path, load with pathlib.Path
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.mtx'
scipy.io.mmwrite(str(path), data)
data_new = scipy.io.mmread(path)
assert (data_new != data).nnz == 0
def test_mmio_write(self):
with tempdir() as temp_dir:
data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
path = Path(temp_dir) / 'data.mtx'
scipy.io.mmwrite(path, data)
def test_netcdf_file(self):
path = Path(__file__).parent / 'data/example_1.nc'
scipy.io.netcdf_file(path)
def test_wavfile_read(self):
path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
scipy.io.wavfile.read(path)
def test_wavfile_write(self):
# Read from str path, write to Path
input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
rate, data = scipy.io.wavfile.read(str(input_path))
with tempdir() as temp_dir:
output_path = Path(temp_dir) / input_path.name
scipy.io.wavfile.write(output_path, rate, data)
| 3,178
| 32.819149
| 81
|
py
|
scipy
|
scipy-main/scipy/io/tests/test_netcdf.py
|
''' Tests for netcdf '''
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_equal,
break_cycles, suppress_warnings, IS_PYPY)
from pytest import raises as assert_raises
from scipy.io import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def assert_mask_matches(arr, expected_mask):
'''
Asserts that the mask of arr is effectively the same as expected_mask.
In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
testing the 'mask' of a standard numpy array (the mask in this case is treated
as all False).
Parameters
----------
arr : ndarray or MaskedArray
Array to test.
expected_mask : array_like of booleans
A list giving the expected mask.
'''
mask = np.ma.getmaskarray(arr)
assert_equal(mask, expected_mask)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default (but not on pypy)
assert_equal(f.use_mmap, not IS_PYPY)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with suppress_warnings() as sup:
if IS_PYPY:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True.*")
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
finally:
if IS_PYPY:
# windows cannot remove a dead file held by a mmap
# that has not been collected in PyPy
break_cycles()
break_cycles()
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w'):
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_bytes():
raw_file = BytesIO()
f = netcdf_file(raw_file, mode='w')
# Dataset only has a single variable, dimension and attribute to avoid
# any ambiguity related to order.
f.a = 'b'
f.createDimension('dim', 1)
var = f.createVariable('var', np.int16, ('dim',))
var[0] = -9999
var.c = 'd'
f.sync()
actual = raw_file.getvalue()
expected = (b'CDF\x01'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x0a'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x03'
b'dim\x00'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x0c'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x01'
b'a\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x01'
b'b\x00\x00\x00'
b'\x00\x00\x00\x0b'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x03'
b'var\x00'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x0c'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x01'
b'c\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x00\x00\x00\x01'
b'd\x00\x00\x00'
b'\x00\x00\x00\x03'
b'\x00\x00\x00\x04'
b'\x00\x00\x00\x78'
b'\xd8\xf1\x80\x01')
assert_equal(actual, expected)
def test_encoded_fill_value():
with netcdf_file(BytesIO(), mode='w') as f:
f.createDimension('x', 1)
var = f.createVariable('var', 'S1', ('x',))
assert_equal(var._get_encoded_fill_value(), b'\x00')
var._FillValue = b'\x01'
assert_equal(var._get_encoded_fill_value(), b'\x01')
var._FillValue = b'\x00\x00' # invalid, wrong size
assert_equal(var._get_encoded_fill_value(), b'\x00')
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r'):
pass
with netcdf_file(fname, 'r', mmap=False):
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
with netcdf_file(filename, 'r', mmap=True) as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_appending_issue_gh_8625():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
f.createDimension('x', 2)
f.createVariable('x', float, ('x',))
f.variables['x'][...] = 1
f.flush()
contents = stream.getvalue()
stream = BytesIO(contents)
with netcdf_file(stream, mode='a') as f:
f.variables['x'][...] = 2
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
f.createDimension('x',4) # x is used in createVariable
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
if not IS_PYPY:
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_append_recordDimension():
dataSize = 100
with in_tempdir():
# Create file with record time dimension
with netcdf_file('withRecordDimension.nc', 'w') as f:
f.createDimension('time', None)
f.createVariable('time', 'd', ('time',))
f.createDimension('x', dataSize)
x = f.createVariable('x', 'd', ('x',))
x[:] = np.array(range(dataSize))
f.createDimension('y', dataSize)
y = f.createVariable('y', 'd', ('y',))
y[:] = np.array(range(dataSize))
f.createVariable('testData', 'i', ('time', 'x', 'y'))
f.flush()
f.close()
for i in range(2):
# Open the file in append mode and add data
with netcdf_file('withRecordDimension.nc', 'a') as f:
f.variables['time'].data = np.append(f.variables["time"].data, i)
f.variables['testData'][i, :, :] = np.full((dataSize, dataSize), i)
f.flush()
# Read the file and check that append worked
with netcdf_file('withRecordDimension.nc') as f:
assert_equal(f.variables['time'][-1], i)
assert_equal(f.variables['testData'][-1, :, :].copy(), np.full((dataSize, dataSize), i))
assert_equal(f.variables['time'].data.shape[0], i+1)
assert_equal(f.variables['testData'].data.shape[0], i+1)
# Read the file and check that 'data' was not saved as user defined
# attribute of testData variable during append operation
with netcdf_file('withRecordDimension.nc') as f:
with assert_raises(KeyError) as ar:
f.variables['testData']._attributes['data']
ex = ar.value
assert_equal(ex.args[0], 'data')
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
# ------------------------------------------------------------------------
# Test reading with masked values (_FillValue / missing_value)
# ------------------------------------------------------------------------
def test_read_withValuesNearFillValue():
# Regression test for ticket #5626
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var1_fillval0'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withNoFillValue():
# For a variable with no fill value, reading data with maskandscale=True
# should return unmasked data
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var2_noFillval'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1,2,3])
def test_read_withFillValueAndMissingValue():
# For a variable with both _FillValue and missing_value, the _FillValue
# should be used
IRRELEVANT_VALUE = 9999
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [True, False, False])
assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
def test_read_withMissingValue():
# For a variable with missing_value but not _FillValue, the missing_value
# should be used
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var4_missingValue'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withFillValNaN():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var5_fillvalNaN'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_withChar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var6_char'][:]
assert_mask_matches(vardata, [False, True, False])
def test_read_with2dVar():
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
with netcdf_file(fname, maskandscale=True) as f:
vardata = f.variables['var7_2d'][:]
assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
def test_read_withMaskAndScaleFalse():
# If a variable has a _FillValue (or missing_value) attribute, but is read
# with maskandscale set to False, the result should be unmasked
fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
# Open file with mmap=False to avoid problems with closing a mmap'ed file
# when arrays referring to its data still exist:
with netcdf_file(fname, maskandscale=False, mmap=False) as f:
vardata = f.variables['var3_fillvalAndMissingValue'][:]
assert_mask_matches(vardata, [False, False, False])
assert_equal(vardata, [1, 2, 3])
| 19,313
| 34.503676
| 137
|
py
|
scipy
|
scipy-main/scipy/io/tests/test_fortran.py
|
''' Tests for fortran sequential files '''
import tempfile
import shutil
from os import path
from glob import iglob
import re
from numpy.testing import assert_equal, assert_allclose
import numpy as np
import pytest
from scipy.io import (FortranFile,
_test_fortran,
FortranEOFError,
FortranFormattingError)
DATA_PATH = path.join(path.dirname(__file__), 'data')
def test_fortranfiles_read():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
dtype = m.group(1).replace('s', '<')
f = FortranFile(filename, 'r', '<u4')
data = f.read_record(dtype=dtype).reshape(dims, order='F')
f.close()
expected = np.arange(np.prod(dims)).reshape(dims).astype(dtype)
assert_equal(data, expected)
def test_fortranfiles_mixed_record():
filename = path.join(DATA_PATH, "fortran-mixed.dat")
with FortranFile(filename, 'r', '<u4') as f:
record = f.read_record('<i4,<f4,<i8,(2)<f8')
assert_equal(record['f0'][0], 1)
assert_allclose(record['f1'][0], 2.3)
assert_equal(record['f2'][0], 4)
assert_allclose(record['f3'][0], [5.6, 7.8])
def test_fortranfiles_write():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
dtype = m.group(1).replace('s', '<')
data = np.arange(np.prod(dims)).reshape(dims).astype(dtype)
tmpdir = tempfile.mkdtemp()
try:
testFile = path.join(tmpdir,path.basename(filename))
f = FortranFile(testFile, 'w','<u4')
f.write_record(data.T)
f.close()
originalfile = open(filename, 'rb')
newfile = open(testFile, 'rb')
assert_equal(originalfile.read(), newfile.read(),
err_msg=filename)
originalfile.close()
newfile.close()
finally:
shutil.rmtree(tmpdir)
def test_fortranfile_read_mixed_record():
# The data file fortran-3x3d-2i.dat contains the program that
# produced it at the end.
#
# double precision :: a(3,3)
# integer :: b(2)
# ...
# open(1, file='fortran-3x3d-2i.dat', form='unformatted')
# write(1) a, b
# close(1)
#
filename = path.join(DATA_PATH, "fortran-3x3d-2i.dat")
with FortranFile(filename, 'r', '<u4') as f:
record = f.read_record('(3,3)<f8', '2<i4')
ax = np.arange(3*3).reshape(3, 3).astype(np.double)
bx = np.array([-1, -2], dtype=np.int32)
assert_equal(record[0], ax.T)
assert_equal(record[1], bx.T)
def test_fortranfile_write_mixed_record(tmpdir):
tf = path.join(str(tmpdir), 'test.dat')
records = [
(('f4', 'f4', 'i4'), (np.float32(2), np.float32(3), np.int32(100))),
(('4f4', '(3,3)f4', '8i4'), (np.random.randint(255, size=[4]).astype(np.float32),
np.random.randint(255, size=[3, 3]).astype(np.float32),
np.random.randint(255, size=[8]).astype(np.int32)))
]
for dtype, a in records:
with FortranFile(tf, 'w') as f:
f.write_record(*a)
with FortranFile(tf, 'r') as f:
b = f.read_record(*dtype)
assert_equal(len(a), len(b))
for aa, bb in zip(a, b):
assert_equal(bb, aa)
def test_fortran_roundtrip(tmpdir):
filename = path.join(str(tmpdir), 'test.dat')
np.random.seed(1)
# double precision
m, n, k = 5, 3, 2
a = np.random.randn(m, n, k)
with FortranFile(filename, 'w') as f:
f.write_record(a.T)
a2 = _test_fortran.read_unformatted_double(m, n, k, filename)
with FortranFile(filename, 'r') as f:
a3 = f.read_record('(2,3,5)f8').T
assert_equal(a2, a)
assert_equal(a3, a)
# integer
m, n, k = 5, 3, 2
a = np.random.randn(m, n, k).astype(np.int32)
with FortranFile(filename, 'w') as f:
f.write_record(a.T)
a2 = _test_fortran.read_unformatted_int(m, n, k, filename)
with FortranFile(filename, 'r') as f:
a3 = f.read_record('(2,3,5)i4').T
assert_equal(a2, a)
assert_equal(a3, a)
# mixed
m, n, k = 5, 3, 2
a = np.random.randn(m, n)
b = np.random.randn(k).astype(np.intc)
with FortranFile(filename, 'w') as f:
f.write_record(a.T, b.T)
a2, b2 = _test_fortran.read_unformatted_mixed(m, n, k, filename)
with FortranFile(filename, 'r') as f:
a3, b3 = f.read_record('(3,5)f8', '2i4')
a3 = a3.T
assert_equal(a2, a)
assert_equal(a3, a)
assert_equal(b2, b)
assert_equal(b3, b)
def test_fortran_eof_ok(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with FortranFile(filename, 'r') as f:
assert len(f.read_reals()) == 5
assert len(f.read_reals()) == 3
with pytest.raises(FortranEOFError):
f.read_reals()
def test_fortran_eof_broken_size(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with open(filename, "ab") as f:
f.write(b"\xff")
with FortranFile(filename, 'r') as f:
assert len(f.read_reals()) == 5
assert len(f.read_reals()) == 3
with pytest.raises(FortranFormattingError):
f.read_reals()
def test_fortran_bogus_size(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with open(filename, "w+b") as f:
f.write(b"\xff\xff")
with FortranFile(filename, 'r') as f:
with pytest.raises(FortranFormattingError):
f.read_reals()
def test_fortran_eof_broken_record(tmpdir):
filename = path.join(str(tmpdir), "scratch")
np.random.seed(1)
with FortranFile(filename, 'w') as f:
f.write_record(np.random.randn(5))
f.write_record(np.random.randn(3))
with open(filename, "ab") as f:
f.truncate(path.getsize(filename)-20)
with FortranFile(filename, 'r') as f:
assert len(f.read_reals()) == 5
with pytest.raises(FortranFormattingError):
f.read_reals()
def test_fortran_eof_multidimensional(tmpdir):
filename = path.join(str(tmpdir), "scratch")
n, m, q = 3, 5, 7
dt = np.dtype([("field", np.float64, (n, m))])
a = np.zeros(q, dtype=dt)
with FortranFile(filename, 'w') as f:
f.write_record(a[0])
f.write_record(a)
f.write_record(a)
with open(filename, "ab") as f:
f.truncate(path.getsize(filename)-20)
with FortranFile(filename, 'r') as f:
assert len(f.read_record(dtype=dt)) == 1
assert len(f.read_record(dtype=dt)) == q
with pytest.raises(FortranFormattingError):
f.read_record(dtype=dt)
| 7,572
| 30.953586
| 92
|
py
|
scipy
|
scipy-main/scipy/io/tests/test_wavfile.py
|
import os
import sys
from io import BytesIO
import numpy as np
from numpy.testing import (assert_equal, assert_, assert_array_equal,
break_cycles, suppress_warnings, IS_PYPY)
import pytest
from pytest import raises, warns
from scipy.io import wavfile
def datafile(fn):
return os.path.join(os.path.dirname(__file__), 'data', fn)
def test_read_1():
# 32-bit PCM (which uses extensible format)
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 44100)
assert_(np.issubdtype(data.dtype, np.int32))
assert_equal(data.shape, (4410,))
del data
def test_read_2():
# 8-bit unsigned PCM
for mmap in [False, True]:
filename = 'test-8000Hz-le-2ch-1byteu.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.uint8))
assert_equal(data.shape, (800, 2))
del data
def test_read_3():
# Little-endian float
for mmap in [False, True]:
filename = 'test-44100Hz-2ch-32bit-float-le.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 44100)
assert_(np.issubdtype(data.dtype, np.float32))
assert_equal(data.shape, (441, 2))
del data
def test_read_4():
# Contains unsupported 'PEAK' chunk
for mmap in [False, True]:
with suppress_warnings() as sup:
sup.filter(wavfile.WavFileWarning,
"Chunk .non-data. not understood, skipping it")
filename = 'test-48000Hz-2ch-64bit-float-le-wavex.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 48000)
assert_(np.issubdtype(data.dtype, np.float64))
assert_equal(data.shape, (480, 2))
del data
def test_read_5():
# Big-endian float
for mmap in [False, True]:
filename = 'test-44100Hz-2ch-32bit-float-be.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 44100)
assert_(np.issubdtype(data.dtype, np.float32))
assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and
data.dtype.byteorder == '='))
assert_equal(data.shape, (441, 2))
del data
def test_5_bit_odd_size_no_pad():
# 5-bit, 1 B container, 5 channels, 9 samples, 45 B data chunk
# Generated by LTspice, which incorrectly omits pad byte, but should be
# readable anyway
for mmap in [False, True]:
filename = 'test-8000Hz-le-5ch-9S-5bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.uint8))
assert_equal(data.shape, (9, 5))
# 8-5 = 3 LSBits should be 0
assert_equal(data & 0b00000111, 0)
# Unsigned
assert_equal(data.max(), 0b11111000) # Highest possible
assert_equal(data[0, 0], 128) # Midpoint is 128 for <= 8-bit
assert_equal(data.min(), 0) # Lowest possible
del data
def test_12_bit_even_size():
# 12-bit, 2 B container, 4 channels, 9 samples, 72 B data chunk
# Generated by LTspice from 1 Vpk sine waves
for mmap in [False, True]:
filename = 'test-8000Hz-le-4ch-9S-12bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=mmap)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.int16))
assert_equal(data.shape, (9, 4))
# 16-12 = 4 LSBits should be 0
assert_equal(data & 0b00000000_00001111, 0)
# Signed
assert_equal(data.max(), 0b01111111_11110000) # Highest possible
assert_equal(data[0, 0], 0) # Midpoint is 0 for >= 9-bit
assert_equal(data.min(), -0b10000000_00000000) # Lowest possible
del data
def test_24_bit_odd_size_with_pad():
# 24-bit, 3 B container, 3 channels, 5 samples, 45 B data chunk
# Should not raise any warnings about the data chunk pad byte
filename = 'test-8000Hz-le-3ch-5S-24bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=False)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.int32))
assert_equal(data.shape, (5, 3))
# All LSBytes should be 0
assert_equal(data & 0xff, 0)
# Hand-made max/min samples under different conventions:
# 2**(N-1) 2**(N-1)-1 LSB
assert_equal(data, [[-0x8000_0000, -0x7fff_ff00, -0x200],
[-0x4000_0000, -0x3fff_ff00, -0x100],
[+0x0000_0000, +0x0000_0000, +0x000],
[+0x4000_0000, +0x3fff_ff00, +0x100],
[+0x7fff_ff00, +0x7fff_ff00, +0x200]])
# ^ clipped
def test_20_bit_extra_data():
# 20-bit, 3 B container, 1 channel, 10 samples, 30 B data chunk
# with extra data filling container beyond the bit depth
filename = 'test-8000Hz-le-1ch-10S-20bit-extra.wav'
rate, data = wavfile.read(datafile(filename), mmap=False)
assert_equal(rate, 1234)
assert_(np.issubdtype(data.dtype, np.int32))
assert_equal(data.shape, (10,))
# All LSBytes should still be 0, because 3 B container in 4 B dtype
assert_equal(data & 0xff, 0)
# But it should load the data beyond 20 bits
assert_((data & 0xf00).any())
# Full-scale positive/negative samples, then being halved each time
assert_equal(data, [+0x7ffff000, # +full-scale 20-bit
-0x7ffff000, # -full-scale 20-bit
+0x7ffff000 >> 1, # +1/2
-0x7ffff000 >> 1, # -1/2
+0x7ffff000 >> 2, # +1/4
-0x7ffff000 >> 2, # -1/4
+0x7ffff000 >> 3, # +1/8
-0x7ffff000 >> 3, # -1/8
+0x7ffff000 >> 4, # +1/16
-0x7ffff000 >> 4, # -1/16
])
def test_36_bit_odd_size():
# 36-bit, 5 B container, 3 channels, 5 samples, 75 B data chunk + pad
filename = 'test-8000Hz-le-3ch-5S-36bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=False)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.int64))
assert_equal(data.shape, (5, 3))
# 28 LSBits should be 0
assert_equal(data & 0xfffffff, 0)
# Hand-made max/min samples under different conventions:
# Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_f000_0000, -0x2000_0000],
[-0x4000_0000_0000_0000, -0x3fff_ffff_f000_0000, -0x1000_0000],
[+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000_0000],
[+0x4000_0000_0000_0000, +0x3fff_ffff_f000_0000, +0x1000_0000],
[+0x7fff_ffff_f000_0000, +0x7fff_ffff_f000_0000, +0x2000_0000]]
# ^ clipped
assert_equal(data, correct)
def test_45_bit_even_size():
# 45-bit, 6 B container, 3 channels, 5 samples, 90 B data chunk
filename = 'test-8000Hz-le-3ch-5S-45bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=False)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.int64))
assert_equal(data.shape, (5, 3))
# 19 LSBits should be 0
assert_equal(data & 0x7ffff, 0)
# Hand-made max/min samples under different conventions:
# Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_fff8_0000, -0x10_0000],
[-0x4000_0000_0000_0000, -0x3fff_ffff_fff8_0000, -0x08_0000],
[+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x00_0000],
[+0x4000_0000_0000_0000, +0x3fff_ffff_fff8_0000, +0x08_0000],
[+0x7fff_ffff_fff8_0000, +0x7fff_ffff_fff8_0000, +0x10_0000]]
# ^ clipped
assert_equal(data, correct)
def test_53_bit_odd_size():
# 53-bit, 7 B container, 3 channels, 5 samples, 105 B data chunk + pad
filename = 'test-8000Hz-le-3ch-5S-53bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=False)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.int64))
assert_equal(data.shape, (5, 3))
# 11 LSBits should be 0
assert_equal(data & 0x7ff, 0)
# Hand-made max/min samples under different conventions:
# Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_f800, -0x1000],
[-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_f800, -0x0800],
[+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000],
[+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_f800, +0x0800],
[+0x7fff_ffff_ffff_f800, +0x7fff_ffff_ffff_f800, +0x1000]]
# ^ clipped
assert_equal(data, correct)
def test_64_bit_even_size():
# 64-bit, 8 B container, 3 channels, 5 samples, 120 B data chunk
for mmap in [False, True]:
filename = 'test-8000Hz-le-3ch-5S-64bit.wav'
rate, data = wavfile.read(datafile(filename), mmap=False)
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.int64))
assert_equal(data.shape, (5, 3))
# Hand-made max/min samples under different conventions:
# Fixed-point 2**(N-1) Full-scale 2**(N-1)-1 LSB
correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_ffff, -0x2],
[-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_ffff, -0x1],
[+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0],
[+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_ffff, +0x1],
[+0x7fff_ffff_ffff_ffff, +0x7fff_ffff_ffff_ffff, +0x2]]
# ^ clipped
assert_equal(data, correct)
del data
def test_unsupported_mmap():
# Test containers that cannot be mapped to numpy types
for filename in {'test-8000Hz-le-3ch-5S-24bit.wav',
'test-8000Hz-le-3ch-5S-36bit.wav',
'test-8000Hz-le-3ch-5S-45bit.wav',
'test-8000Hz-le-3ch-5S-53bit.wav',
'test-8000Hz-le-1ch-10S-20bit-extra.wav'}:
with raises(ValueError, match="mmap.*not compatible"):
rate, data = wavfile.read(datafile(filename), mmap=True)
def test_rifx():
# Compare equivalent RIFX and RIFF files
for rifx, riff in {('test-44100Hz-be-1ch-4bytes.wav',
'test-44100Hz-le-1ch-4bytes.wav'),
('test-8000Hz-be-3ch-5S-24bit.wav',
'test-8000Hz-le-3ch-5S-24bit.wav')}:
rate1, data1 = wavfile.read(datafile(rifx), mmap=False)
rate2, data2 = wavfile.read(datafile(riff), mmap=False)
assert_equal(rate1, rate2)
assert_equal(data1, data2)
def test_read_unknown_filetype_fail():
# Not an RIFF
for mmap in [False, True]:
filename = 'example_1.nc'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="CDF.*'RIFF' and 'RIFX' supported"):
wavfile.read(fp, mmap=mmap)
def test_read_unknown_riff_form_type():
# RIFF, but not WAVE form
for mmap in [False, True]:
filename = 'Transparent Busy.ani'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match='Not a WAV file.*ACON'):
wavfile.read(fp, mmap=mmap)
def test_read_unknown_wave_format():
# RIFF and WAVE, but not supported format
for mmap in [False, True]:
filename = 'test-8000Hz-le-1ch-1byte-ulaw.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match='Unknown wave file format.*MULAW.*'
'Supported formats'):
wavfile.read(fp, mmap=mmap)
def test_read_early_eof_with_data():
# File ends inside 'data' chunk, but we keep incomplete data
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes-early-eof.wav'
with open(datafile(filename), 'rb') as fp:
with warns(wavfile.WavFileWarning, match='Reached EOF'):
rate, data = wavfile.read(fp, mmap=mmap)
assert data.size > 0
assert rate == 44100
# also test writing (gh-12176)
data[0] = 0
def test_read_early_eof():
# File ends after 'fact' chunk at boundary, no data read
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="Unexpected end of file."):
wavfile.read(fp, mmap=mmap)
def test_read_incomplete_chunk():
# File ends inside 'fmt ' chunk ID, no data read
for mmap in [False, True]:
filename = 'test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="Incomplete chunk ID.*b'f'"):
wavfile.read(fp, mmap=mmap)
def test_read_inconsistent_header():
# File header's size fields contradict each other
for mmap in [False, True]:
filename = 'test-8000Hz-le-3ch-5S-24bit-inconsistent.wav'
with open(datafile(filename), 'rb') as fp:
with raises(ValueError, match="header is invalid"):
wavfile.read(fp, mmap=mmap)
# signed 8-bit integer PCM is not allowed
# unsigned > 8-bit integer PCM is not allowed
# 8- or 16-bit float PCM is not expected
# g and q are platform-dependent, so not included
@pytest.mark.parametrize("dt_str", ["<i2", "<i4", "<i8", "<f4", "<f8",
">i2", ">i4", ">i8", ">f4", ">f8", '|u1'])
@pytest.mark.parametrize("channels", [1, 2, 5])
@pytest.mark.parametrize("rate", [8000, 32000])
@pytest.mark.parametrize("mmap", [False, True])
@pytest.mark.parametrize("realfile", [False, True])
def test_write_roundtrip(realfile, mmap, rate, channels, dt_str, tmpdir):
dtype = np.dtype(dt_str)
if realfile:
tmpfile = str(tmpdir.join('temp.wav'))
else:
tmpfile = BytesIO()
data = np.random.rand(100, channels)
if channels == 1:
data = data[:, 0]
if dtype.kind == 'f':
# The range of the float type should be in [-1, 1]
data = data.astype(dtype)
else:
data = (data*128).astype(dtype)
wavfile.write(tmpfile, rate, data)
rate2, data2 = wavfile.read(tmpfile, mmap=mmap)
assert_equal(rate, rate2)
assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype)
assert_array_equal(data, data2)
# also test writing (gh-12176)
if realfile:
data2[0] = 0
else:
with pytest.raises(ValueError, match='read-only'):
data2[0] = 0
if realfile and mmap and IS_PYPY and sys.platform == 'win32':
# windows cannot remove a dead file held by a mmap but not collected
# in PyPy; since the filename gets reused in this test, clean this up
break_cycles()
break_cycles()
| 15,303
| 35.70024
| 78
|
py
|
scipy
|
scipy-main/scipy/io/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/io/tests/test_idl.py
|
from os import path
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_, suppress_warnings)
import pytest
from scipy.io import readsav
from scipy.io import _idl
DATA_PATH = path.join(path.dirname(__file__), 'data')
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (5, ))
assert_equal(s.arrays_rep.b.shape, (5, ))
assert_equal(s.arrays_rep.c.shape, (5, ))
assert_equal(s.arrays_rep.d.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.a[i],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i],
np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_arrays_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
assert_equal(s.arrays_rep.d.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.a[i, j, k],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i, j, k],
np.array([4., 5., 6., 7.],
dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i, j, k],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i, j, k],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_inheritance(self):
s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
assert_identical(s.fc.x, np.array([0], dtype=np.int16))
assert_identical(s.fc.y, np.array([0], dtype=np.int16))
assert_identical(s.fc.r, np.array([0], dtype=np.int16))
assert_identical(s.fc.c, np.array([4], dtype=np.int16))
def test_arrays_corrupt_idl80(self):
# test byte arrays with missing nbyte information from IDL 8.0 .sav file
with suppress_warnings() as sup:
sup.filter(UserWarning, "Not able to verify number of bytes from header")
s = readsav(path.join(DATA_PATH,'struct_arrays_byte_idl80.sav'),
verbose=False)
assert_identical(s.y.x[0], np.array([55,66], dtype=np.uint8))
class TestPointers:
# Check that pointers in .sav files produce references to the same object in Python
def test_pointers(self):
s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False)
assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_(s.c64_pointer1 is s.c64_pointer2)
class TestPointerArray:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
class TestPointerStructures:
# Test that structures are correctly read in
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
assert_(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (5, ))
assert_equal(s.arrays_rep.h.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[i, j, k],
np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i, j, k],
np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
class TestTags:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
def test_attrdict():
d = _idl.AttrDict({'one': 1})
assert d['one'] == 1
assert d.one == 1
with pytest.raises(KeyError):
d['two']
with pytest.raises(AttributeError, match='has no attribute'):
d.two
| 19,680
| 43.426637
| 105
|
py
|
scipy
|
scipy-main/scipy/io/tests/test_mmio.py
|
from tempfile import mkdtemp
import os
import io
import shutil
import textwrap
import numpy as np
from numpy import array, transpose, pi
from numpy.testing import (assert_equal, assert_allclose,
assert_array_equal, assert_array_almost_equal)
import pytest
from pytest import raises as assert_raises
import scipy.sparse
from scipy.io import mminfo, mmread, mmwrite
parametrize_args = [('integer', 'int'),
('unsigned-integer', 'uint')]
class TestMMIOArray:
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a, b)
def check_exact(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_equal(a, b)
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_integer(self, typeval, dtype):
self.check_exact(array([[1, 2], [3, 4]], dtype=dtype),
(2, 2, 4, 'array', typeval, 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_32bit_integer(self, typeval, dtype):
a = array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=dtype)
self.check_exact(a, (2, 2, 4, 'array', typeval, 'general'))
def test_64bit_integer(self):
a = array([[2**31, 2**32], [2**63-2, 2**63-1]], dtype=np.int64)
if (np.intp(0).itemsize < 8):
assert_raises(OverflowError, mmwrite, self.fn, a)
else:
self.check_exact(a, (2, 2, 4, 'array', 'integer', 'general'))
def test_64bit_unsigned_integer(self):
a = array([[2**31, 2**32], [2**64-2, 2**64-1]], dtype=np.uint64)
self.check_exact(a, (2, 2, 4, 'array', 'unsigned-integer', 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_upper_triangle_integer(self, typeval, dtype):
self.check_exact(array([[0, 1], [0, 0]], dtype=dtype),
(2, 2, 4, 'array', typeval, 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_lower_triangle_integer(self, typeval, dtype):
self.check_exact(array([[0, 0], [1, 0]], dtype=dtype),
(2, 2, 4, 'array', typeval, 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_rectangular_integer(self, typeval, dtype):
self.check_exact(array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
(2, 3, 6, 'array', typeval, 'general'))
def test_simple_rectangular_float(self):
self.check([[1, 2], [3.5, 4], [5, 6]],
(3, 2, 6, 'array', 'real', 'general'))
def test_simple_float(self):
self.check([[1, 2], [3, 4.0]],
(2, 2, 4, 'array', 'real', 'general'))
def test_simple_complex(self):
self.check([[1, 2], [3, 4j]],
(2, 2, 4, 'array', 'complex', 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_symmetric_integer(self, typeval, dtype):
self.check_exact(array([[1, 2], [2, 4]], dtype=dtype),
(2, 2, 4, 'array', typeval, 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check_exact([[0, 2], [-2, 0]],
(2, 2, 4, 'array', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(array([[0, 2], [-2.0, 0.0]], 'f'),
(2, 2, 4, 'array', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check([[1, 2+3j], [2-3j, 4]],
(2, 2, 4, 'array', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = np.random.random(sz)
a = a + transpose(a)
self.check(a, (20, 20, 400, 'array', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = np.random.random(sz)
self.check(a, (20, 15, 300, 'array', 'real', 'general'))
def test_bad_number_of_array_header_fields(self):
s = """\
%%MatrixMarket matrix array real general
3 3 999
1.0
2.0
3.0
4.0
5.0
6.0
7.0
8.0
9.0
"""
text = textwrap.dedent(s).encode('ascii')
with pytest.raises(ValueError, match='not of length 2'):
scipy.io.mmread(io.BytesIO(text))
def test_gh13634_non_skew_symmetric_int(self):
self.check_exact(array([[1, 2], [-2, 99]], dtype=np.int32),
(2, 2, 4, 'array', 'integer', 'general'))
def test_gh13634_non_skew_symmetric_float(self):
self.check(array([[1, 2], [-2, 99.]], dtype=np.float32),
(2, 2, 4, 'array', 'real', 'general'))
class TestMMIOSparseCSR(TestMMIOArray):
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a.toarray(), b.toarray())
def check_exact(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_equal(a.toarray(), b.toarray())
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [3, 4]], dtype=dtype),
(2, 2, 4, 'coordinate', typeval, 'general'))
def test_32bit_integer(self):
a = scipy.sparse.csr_matrix(array([[2**31-1, -2**31+2],
[2**31-3, 2**31-4]],
dtype=np.int32))
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_64bit_integer(self):
a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1],
[-2**63+2, 2**63-2]],
dtype=np.int64))
if (np.intp(0).itemsize < 8):
assert_raises(OverflowError, mmwrite, self.fn, a)
else:
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_32bit_unsigned_integer(self):
a = scipy.sparse.csr_matrix(array([[2**31-1, 2**31-2],
[2**31-3, 2**31-4]],
dtype=np.uint32))
self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general'))
def test_64bit_unsigned_integer(self):
a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1],
[2**64-2, 2**64-1]],
dtype=np.uint64))
self.check_exact(a, (2, 2, 4, 'coordinate', 'unsigned-integer', 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_upper_triangle_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[0, 1], [0, 0]], dtype=dtype),
(2, 2, 1, 'coordinate', typeval, 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_lower_triangle_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[0, 0], [1, 0]], dtype=dtype),
(2, 2, 1, 'coordinate', typeval, 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_rectangular_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]], dtype=dtype),
(2, 3, 6, 'coordinate', typeval, 'general'))
def test_simple_rectangular_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]),
(3, 2, 6, 'coordinate', 'real', 'general'))
def test_simple_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]),
(2, 2, 4, 'coordinate', 'real', 'general'))
def test_simple_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]),
(2, 2, 4, 'coordinate', 'complex', 'general'))
@pytest.mark.parametrize('typeval, dtype', parametrize_args)
def test_simple_symmetric_integer(self, typeval, dtype):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [2, 4]], dtype=dtype),
(2, 2, 3, 'coordinate', typeval, 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[0, 2], [-2, 0]]),
(2, 2, 1, 'coordinate', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(scipy.sparse.csr_matrix(array([[0, 2], [-2.0, 0]], 'f')),
(2, 2, 1, 'coordinate', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2+3j], [2-3j, 4]]),
(2, 2, 3, 'coordinate', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = np.random.random(sz)
a = a + transpose(a)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = np.random.random(sz)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 15, 300, 'coordinate', 'real', 'general'))
def test_simple_pattern(self):
a = scipy.sparse.csr_matrix([[0, 1.5], [3.0, 2.5]])
p = np.zeros_like(a.toarray())
p[a.toarray() > 0] = 1
info = (2, 2, 3, 'coordinate', 'pattern', 'general')
mmwrite(self.fn, a, field='pattern')
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(p, b.toarray())
def test_gh13634_non_skew_symmetric_int(self):
a = scipy.sparse.csr_matrix([[1, 2], [-2, 99]], dtype=np.int32)
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_gh13634_non_skew_symmetric_float(self):
a = scipy.sparse.csr_matrix([[1, 2], [-2, 99.]], dtype=np.float32)
self.check(a, (2, 2, 4, 'coordinate', 'real', 'general'))
_32bit_integer_dense_example = '''\
%%MatrixMarket matrix array integer general
2 2
2147483647
2147483646
2147483647
2147483646
'''
_32bit_integer_sparse_example = '''\
%%MatrixMarket matrix coordinate integer symmetric
2 2 2
1 1 2147483647
2 2 2147483646
'''
_64bit_integer_dense_example = '''\
%%MatrixMarket matrix array integer general
2 2
2147483648
-9223372036854775806
-2147483648
9223372036854775807
'''
_64bit_integer_sparse_general_example = '''\
%%MatrixMarket matrix coordinate integer general
2 2 3
1 1 2147483648
1 2 9223372036854775807
2 2 9223372036854775807
'''
_64bit_integer_sparse_symmetric_example = '''\
%%MatrixMarket matrix coordinate integer symmetric
2 2 3
1 1 2147483648
1 2 -9223372036854775807
2 2 9223372036854775807
'''
_64bit_integer_sparse_skew_example = '''\
%%MatrixMarket matrix coordinate integer skew-symmetric
2 2 3
1 1 2147483648
1 2 -9223372036854775807
2 2 9223372036854775807
'''
_over64bit_integer_dense_example = '''\
%%MatrixMarket matrix array integer general
2 2
2147483648
9223372036854775807
2147483648
9223372036854775808
'''
_over64bit_integer_sparse_example = '''\
%%MatrixMarket matrix coordinate integer symmetric
2 2 2
1 1 2147483648
2 2 19223372036854775808
'''
class TestMMIOReadLargeIntegers:
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check_read(self, example, a, info, dense, over32, over64):
with open(self.fn, 'w') as f:
f.write(example)
assert_equal(mminfo(self.fn), info)
if (over32 and (np.intp(0).itemsize < 8)) or over64:
assert_raises(OverflowError, mmread, self.fn)
else:
b = mmread(self.fn)
if not dense:
b = b.toarray()
assert_equal(a, b)
def test_read_32bit_integer_dense(self):
a = array([[2**31-1, 2**31-1],
[2**31-2, 2**31-2]], dtype=np.int64)
self.check_read(_32bit_integer_dense_example,
a,
(2, 2, 4, 'array', 'integer', 'general'),
dense=True,
over32=False,
over64=False)
def test_read_32bit_integer_sparse(self):
a = array([[2**31-1, 0],
[0, 2**31-2]], dtype=np.int64)
self.check_read(_32bit_integer_sparse_example,
a,
(2, 2, 2, 'coordinate', 'integer', 'symmetric'),
dense=False,
over32=False,
over64=False)
def test_read_64bit_integer_dense(self):
a = array([[2**31, -2**31],
[-2**63+2, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_dense_example,
a,
(2, 2, 4, 'array', 'integer', 'general'),
dense=True,
over32=True,
over64=False)
def test_read_64bit_integer_sparse_general(self):
a = array([[2**31, 2**63-1],
[0, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_sparse_general_example,
a,
(2, 2, 3, 'coordinate', 'integer', 'general'),
dense=False,
over32=True,
over64=False)
def test_read_64bit_integer_sparse_symmetric(self):
a = array([[2**31, -2**63+1],
[-2**63+1, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_sparse_symmetric_example,
a,
(2, 2, 3, 'coordinate', 'integer', 'symmetric'),
dense=False,
over32=True,
over64=False)
def test_read_64bit_integer_sparse_skew(self):
a = array([[2**31, -2**63+1],
[2**63-1, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_sparse_skew_example,
a,
(2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'),
dense=False,
over32=True,
over64=False)
def test_read_over64bit_integer_dense(self):
self.check_read(_over64bit_integer_dense_example,
None,
(2, 2, 4, 'array', 'integer', 'general'),
dense=True,
over32=True,
over64=True)
def test_read_over64bit_integer_sparse(self):
self.check_read(_over64bit_integer_sparse_example,
None,
(2, 2, 2, 'coordinate', 'integer', 'symmetric'),
dense=False,
over32=True,
over64=True)
_general_example = '''\
%%MatrixMarket matrix coordinate real general
%=================================================================================
%
% This ASCII file represents a sparse MxN matrix with L
% nonzeros in the following Matrix Market format:
%
% +----------------------------------------------+
% |%%MatrixMarket matrix coordinate real general | <--- header line
% |% | <--+
% |% comments | |-- 0 or more comment lines
% |% | <--+
% | M N L | <--- rows, columns, entries
% | I1 J1 A(I1, J1) | <--+
% | I2 J2 A(I2, J2) | |
% | I3 J3 A(I3, J3) | |-- L lines
% | . . . | |
% | IL JL A(IL, JL) | <--+
% +----------------------------------------------+
%
% Indices are 1-based, i.e. A(1,1) is the first element.
%
%=================================================================================
5 5 8
1 1 1.000e+00
2 2 1.050e+01
3 3 1.500e-02
1 4 6.000e+00
4 2 2.505e+02
4 4 -2.800e+02
4 5 3.332e+01
5 5 1.200e+01
'''
_hermitian_example = '''\
%%MatrixMarket matrix coordinate complex hermitian
5 5 7
1 1 1.0 0
2 2 10.5 0
4 2 250.5 22.22
3 3 1.5e-2 0
4 4 -2.8e2 0
5 5 12. 0
5 4 0 33.32
'''
_skew_example = '''\
%%MatrixMarket matrix coordinate real skew-symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 0
'''
_symmetric_example = '''\
%%MatrixMarket matrix coordinate real symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 8
'''
_symmetric_pattern_example = '''\
%%MatrixMarket matrix coordinate pattern symmetric
5 5 7
1 1
2 2
4 2
3 3
4 4
5 5
5 4
'''
# example (without comment lines) from Figure 1 in
# https://math.nist.gov/MatrixMarket/reports/MMformat.ps
_empty_lines_example = '''\
%%MatrixMarket MATRIX Coordinate Real General
5 5 8
1 1 1.0
2 2 10.5
3 3 1.5e-2
4 4 -2.8E2
5 5 12.
1 4 6
4 2 250.5
4 5 33.32
'''
class TestMMIOCoordinate:
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check_read(self, example, a, info):
f = open(self.fn, 'w')
f.write(example)
f.close()
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn).toarray()
assert_array_almost_equal(a, b)
def test_read_general(self):
a = [[1, 0, 0, 6, 0],
[0, 10.5, 0, 0, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 33.32],
[0, 0, 0, 0, 12]]
self.check_read(_general_example, a,
(5, 5, 8, 'coordinate', 'real', 'general'))
def test_read_hermitian(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5 - 22.22j, 0],
[0, 0, .015, 0, 0],
[0, 250.5 + 22.22j, 0, -280, -33.32j],
[0, 0, 0, 33.32j, 12]]
self.check_read(_hermitian_example, a,
(5, 5, 7, 'coordinate', 'complex', 'hermitian'))
def test_read_skew(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, -250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 0],
[0, 0, 0, 0, 12]]
self.check_read(_skew_example, a,
(5, 5, 7, 'coordinate', 'real', 'skew-symmetric'))
def test_read_symmetric(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 8],
[0, 0, 0, 8, 12]]
self.check_read(_symmetric_example, a,
(5, 5, 7, 'coordinate', 'real', 'symmetric'))
def test_read_symmetric_pattern(self):
a = [[1, 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 1, 1]]
self.check_read(_symmetric_pattern_example, a,
(5, 5, 7, 'coordinate', 'pattern', 'symmetric'))
def test_read_empty_lines(self):
a = [[1, 0, 0, 6, 0],
[0, 10.5, 0, 0, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 33.32],
[0, 0, 0, 0, 12]]
self.check_read(_empty_lines_example, a,
(5, 5, 8, 'coordinate', 'real', 'general'))
def test_empty_write_read(self):
# https://github.com/scipy/scipy/issues/1410 (Trac #883)
b = scipy.sparse.coo_matrix((10, 10))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(10, 10, 0, 'coordinate', 'real', 'symmetric'))
a = b.toarray()
b = mmread(self.fn).toarray()
assert_array_almost_equal(a, b)
def test_bzip2_py3(self):
# test if fix for #2152 works
try:
# bz2 module isn't always built when building Python.
import bz2
except ImportError:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
fn_bzip2 = "%s.bz2" % self.fn
with open(self.fn, 'rb') as f_in:
f_out = bz2.BZ2File(fn_bzip2, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_bzip2).toarray()
assert_array_almost_equal(a, b.toarray())
def test_gzip_py3(self):
# test if fix for #2152 works
try:
# gzip module can be missing from Python installation
import gzip
except ImportError:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
fn_gzip = "%s.gz" % self.fn
with open(self.fn, 'rb') as f_in:
f_out = gzip.open(fn_gzip, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_gzip).toarray()
assert_array_almost_equal(a, b.toarray())
def test_real_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(5, 5, 8, 'coordinate', 'real', 'general'))
a = b.toarray()
b = mmread(self.fn).toarray()
assert_array_almost_equal(a, b)
def test_complex_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(5, 5, 8, 'coordinate', 'complex', 'general'))
a = b.toarray()
b = mmread(self.fn).toarray()
assert_array_almost_equal(a, b)
def test_sparse_formats(self, tmp_path):
# Note: `tmp_path` is a pytest fixture, it handles cleanup
tmpdir = tmp_path / 'sparse_formats'
tmpdir.mkdir()
mats = []
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
for mat in mats:
expected = mat.toarray()
for fmt in ['csr', 'csc', 'coo']:
fname = tmpdir / (fmt + '.mtx')
mmwrite(fname, mat.asformat(fmt))
result = mmread(fname).toarray()
assert_array_almost_equal(result, expected)
def test_precision(self):
test_values = [pi] + [10**(i) for i in range(0, -10, -1)]
test_precisions = range(1, 10)
for value in test_values:
for precision in test_precisions:
# construct sparse matrix with test value at last main diagonal
n = 10**precision + 1
A = scipy.sparse.dok_matrix((n, n))
A[n-1, n-1] = value
# write matrix with test precision and read again
mmwrite(self.fn, A, precision=precision)
A = scipy.io.mmread(self.fn)
# check for right entries in matrix
assert_array_equal(A.row, [n-1])
assert_array_equal(A.col, [n-1])
assert_allclose(A.data, [float('%%.%dg' % precision % value)])
def test_bad_number_of_coordinate_header_fields(self):
s = """\
%%MatrixMarket matrix coordinate real general
5 5 8 999
1 1 1.000e+00
2 2 1.050e+01
3 3 1.500e-02
1 4 6.000e+00
4 2 2.505e+02
4 4 -2.800e+02
4 5 3.332e+01
5 5 1.200e+01
"""
text = textwrap.dedent(s).encode('ascii')
with pytest.raises(ValueError, match='not of length 3'):
scipy.io.mmread(io.BytesIO(text))
def test_gh11389():
mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n"
" 1 1 1\n"
"1 1 -2.1846000000000e+02 0.0000000000000e+00"))
def test_gh18123(tmp_path):
lines = [" %%MatrixMarket matrix coordinate real general\n",
"5 5 3\n",
"2 3 1.0\n",
"3 4 2.0\n",
"3 5 3.0\n"]
test_file = tmp_path / "test.mtx"
with open(test_file, "w") as f:
f.writelines(lines)
mmread(test_file)
| 26,906
| 33.853627
| 86
|
py
|
scipy
|
scipy-main/scipy/_build_utils/setup.py
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_build_utils', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 350
| 30.909091
| 68
|
py
|
scipy
|
scipy-main/scipy/_build_utils/tempita.py
|
import sys
import os
import argparse
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita templated file and write out the result.
The template file is expected to end in `.c.in` or `.pyx.in`:
E.g. processing `template.c.in` generates `template.c`.
"""
if outfile is None:
# We're dealing with a distutils build here, write in-place
outfile = os.path.splitext(fromfile)[0]
from_filename = tempita.Template.from_filename
template = from_filename(fromfile,
encoding=sys.getdefaultencoding())
content = template.substitute()
with open(outfile, 'w') as f:
f.write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str,
help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
parser.add_argument("-i", "--ignore", type=str,
help="An ignored input - may be useful to add a "
"dependency between custom targets")
args = parser.parse_args()
if not args.infile.endswith('.in'):
raise ValueError(f"Unexpected extension: {args.infile}")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outfile = os.path.join(outdir_abs,
os.path.splitext(os.path.split(args.infile)[1])[0])
process_tempita(args.infile, outfile)
if __name__ == "__main__":
main()
| 1,672
| 29.981481
| 78
|
py
|
scipy
|
scipy-main/scipy/_build_utils/_fortran.py
|
import re
import os
import sys
from distutils.util import get_platform
import numpy as np
from .system_info import combine_dict
__all__ = ['needs_g77_abi_wrapper', 'get_g77_abi_wrappers',
'gfortran_legacy_flag_hook', 'blas_ilp64_pre_build_hook',
'get_f2py_int64_options', 'generic_pre_build_hook',
'write_file_content', 'ilp64_pre_build_hook']
def get_fcompiler_ilp64_flags():
"""
Dictionary of compiler flags for switching to 8-byte default integer
size.
"""
flags = {
'absoft': ['-i8'], # Absoft
'compaq': ['-i8'], # Compaq Fortran
'compaqv': ['/integer_size:64'], # Compaq Visual Fortran
'g95': ['-i8'], # g95
'gnu95': ['-fdefault-integer-8'], # GNU gfortran
'ibm': ['-qintsize=8'], # IBM XL Fortran
'intel': ['-i8'], # Intel Fortran Compiler for 32-bit
'intele': ['-i8'], # Intel Fortran Compiler for Itanium
'intelem': ['-i8'], # Intel Fortran Compiler for 64-bit
'intelv': ['-i8'], # Intel Visual Fortran Compiler for 32-bit
'intelev': ['-i8'], # Intel Visual Fortran Compiler for Itanium
'intelvem': ['-i8'], # Intel Visual Fortran Compiler for 64-bit
'lahey': ['--long'], # Lahey/Fujitsu Fortran 95 Compiler
'mips': ['-i8'], # MIPSpro Fortran Compiler
'nag': ['-i8'], # NAGWare Fortran 95 compiler
'nagfor': ['-i8'], # NAG Fortran compiler
'pathf95': ['-i8'], # PathScale Fortran compiler
'pg': ['-i8'], # Portland Group Fortran Compiler
'flang': ['-i8'], # Portland Group Fortran LLVM Compiler
'sun': ['-i8'], # Sun or Forte Fortran 95 Compiler
}
# No support for this:
# - g77
# - hpux
# Unknown:
# - vast
return flags
def get_fcompiler_macro_include_flags(path):
"""
Dictionary of compiler flags for cpp-style preprocessing, with
an #include search path, and safety options necessary for macro
expansion.
"""
intel_opts = ['-fpp', '-I' + path]
nag_opts = ['-fpp', '-I' + path]
flags = {
'absoft': ['-W132', '-cpp', '-I' + path],
'gnu95': ['-cpp', '-ffree-line-length-none',
'-ffixed-line-length-none', '-I' + path],
'intel': intel_opts,
'intele': intel_opts,
'intelem': intel_opts,
'intelv': intel_opts,
'intelev': intel_opts,
'intelvem': intel_opts,
'lahey': ['-Cpp', '--wide', '-I' + path],
'mips': ['-col120', '-I' + path],
'nag': nag_opts,
'nagfor': nag_opts,
'pathf95': ['-ftpp', '-macro-expand', '-I' + path],
'flang': ['-Mpreprocess', '-Mextend', '-I' + path],
'sun': ['-fpp', '-I' + path],
}
# No support for this:
# - ibm (line length option turns on fixed format)
# TODO:
# - pg
return flags
def uses_mkl(info):
r_mkl = re.compile("mkl")
libraries = info.get('libraries', '')
for library in libraries:
if r_mkl.search(library):
return True
return False
def needs_g77_abi_wrapper(info):
"""Returns True if g77 ABI wrapper must be used."""
try:
needs_wrapper = int(os.environ["SCIPY_USE_G77_ABI_WRAPPER"]) != 0
except KeyError:
needs_wrapper = uses_mkl(info)
return needs_wrapper
def get_g77_abi_wrappers(info):
"""
Returns file names of source files containing Fortran ABI wrapper
routines.
"""
wrapper_sources = []
path = os.path.abspath(os.path.dirname(__file__))
if needs_g77_abi_wrapper(info):
wrapper_sources += [
os.path.join(path, 'src', 'wrap_g77_abi_f.f'),
os.path.join(path, 'src', 'wrap_g77_abi_c.c'),
]
else:
wrapper_sources += [
os.path.join(path, 'src', 'wrap_dummy_g77_abi.f'),
]
return wrapper_sources
def gfortran_legacy_flag_hook(cmd, ext):
"""
Pre-build hook to add dd gfortran legacy flag -fallow-argument-mismatch
"""
from .compiler_helper import try_add_flag
from scipy._lib import _pep440
if isinstance(ext, dict):
# build_clib
compilers = ((cmd._f_compiler, ext.setdefault('extra_f77_compile_args', [])),
(cmd._f_compiler, ext.setdefault('extra_f90_compile_args', [])))
else:
# build_ext
compilers = ((cmd._f77_compiler, ext.extra_f77_compile_args),
(cmd._f90_compiler, ext.extra_f90_compile_args))
for compiler, args in compilers:
if compiler is None:
continue
if (compiler.compiler_type == "gnu95" and
_pep440.parse(str(compiler.version)) >= _pep440.Version("10")):
try_add_flag(args, compiler, "-fallow-argument-mismatch")
def _get_build_src_dir():
plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
return os.path.join('build', 'src' + plat_specifier)
def get_f2py_int64_options():
if np.dtype('i') == np.dtype(np.int64):
int64_name = 'int'
elif np.dtype('l') == np.dtype(np.int64):
int64_name = 'long'
elif np.dtype('q') == np.dtype(np.int64):
int64_name = 'long_long'
else:
raise RuntimeError("No 64-bit integer type available in f2py!")
f2cmap_fn = os.path.join(_get_build_src_dir(), 'int64.f2cmap')
text = "{{'integer': {{'': '{}'}}, 'logical': {{'': '{}'}}}}\n".format(
int64_name, int64_name)
write_file_content(f2cmap_fn, text)
return ['--f2cmap', f2cmap_fn]
def ilp64_pre_build_hook(cmd, ext):
"""
Pre-build hook for adding Fortran compiler flags that change
default integer size to 64-bit.
"""
fcompiler_flags = get_fcompiler_ilp64_flags()
return generic_pre_build_hook(cmd, ext, fcompiler_flags=fcompiler_flags)
def blas_ilp64_pre_build_hook(blas_info):
"""
Pre-build hook for adding ILP64 BLAS compilation flags, and
mangling Fortran source files to rename BLAS/LAPACK symbols when
there are symbol suffixes.
Examples
--------
::
from scipy._build_utils import blas_ilp64_pre_build_hook
ext = config.add_extension(...)
ext._pre_build_hook = blas_ilp64_pre_build_hook(blas_info)
"""
return lambda cmd, ext: _blas_ilp64_pre_build_hook(cmd, ext, blas_info)
def _blas_ilp64_pre_build_hook(cmd, ext, blas_info):
# Determine BLAS symbol suffix/prefix, if any
macros = dict(blas_info.get('define_macros', []))
prefix = macros.get('BLAS_SYMBOL_PREFIX', '')
suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')
if suffix:
if not suffix.endswith('_'):
# Symbol suffix has to end with '_' to be Fortran-compatible
raise RuntimeError("BLAS/LAPACK has incompatible symbol suffix: "
"{!r}".format(suffix))
suffix = suffix[:-1]
# When symbol prefix/suffix is present, we have to patch sources
if prefix or suffix:
include_dir = os.path.join(_get_build_src_dir(), 'blas64-include')
fcompiler_flags = combine_dict(get_fcompiler_ilp64_flags(),
get_fcompiler_macro_include_flags(include_dir))
# Add the include dir for C code
if isinstance(ext, dict):
ext.setdefault('include_dirs', [])
ext['include_dirs'].append(include_dir)
else:
ext.include_dirs.append(include_dir)
# Create name-mapping include files
include_name_f = 'blas64-prefix-defines.inc'
include_name_c = 'blas64-prefix-defines.h'
include_fn_f = os.path.join(include_dir, include_name_f)
include_fn_c = os.path.join(include_dir, include_name_c)
text = ""
for symbol in get_blas_lapack_symbols():
text += f'#define {symbol} {prefix}{symbol}_{suffix}\n'
text += f'#define {symbol.upper()} {prefix}{symbol}_{suffix}\n'
# Code generation may give source codes with mixed-case names
for j in (1, 2):
s = symbol[:j].lower() + symbol[j:].upper()
text += f'#define {s} {prefix}{symbol}_{suffix}\n'
s = symbol[:j].upper() + symbol[j:].lower()
text += f'#define {s} {prefix}{symbol}_{suffix}\n'
write_file_content(include_fn_f, text)
ctext = re.sub(r'^#define (.*) (.*)$', r'#define \1_ \2_', text, flags=re.M)
write_file_content(include_fn_c, text + "\n" + ctext)
# Patch sources to include it
def patch_source(filename, old_text):
text = f'#include "{include_name_f}"\n'
text += old_text
return text
else:
fcompiler_flags = get_fcompiler_ilp64_flags()
patch_source = None
return generic_pre_build_hook(cmd, ext,
fcompiler_flags=fcompiler_flags,
patch_source_func=patch_source,
source_fnpart="_blas64")
def generic_pre_build_hook(cmd, ext, fcompiler_flags, patch_source_func=None,
source_fnpart=None):
"""
Pre-build hook for adding compiler flags and patching sources.
Parameters
----------
cmd : distutils.core.Command
Hook input. Current distutils command (build_clib or build_ext).
ext : dict or numpy.distutils.extension.Extension
Hook input. Configuration information for library (dict, build_clib)
or extension (numpy.distutils.extension.Extension, build_ext).
fcompiler_flags : dict
Dictionary of ``{'compiler_name': ['-flag1', ...]}`` containing
compiler flags to set.
patch_source_func : callable, optional
Function patching sources, see `_generic_patch_sources` below.
source_fnpart : str, optional
String to append to the modified file basename before extension.
"""
is_clib = isinstance(ext, dict)
if is_clib:
build_info = ext
del ext
# build_clib doesn't have separate f77/f90 compilers
f77 = cmd._f_compiler
f90 = cmd._f_compiler
else:
f77 = cmd._f77_compiler
f90 = cmd._f90_compiler
# Add compiler flags
if is_clib:
f77_args = build_info.setdefault('extra_f77_compile_args', [])
f90_args = build_info.setdefault('extra_f90_compile_args', [])
compilers = [(f77, f77_args), (f90, f90_args)]
else:
compilers = [(f77, ext.extra_f77_compile_args),
(f90, ext.extra_f90_compile_args)]
for compiler, args in compilers:
if compiler is None:
continue
try:
flags = fcompiler_flags[compiler.compiler_type]
except KeyError as e:
raise RuntimeError(
"Compiler {!r} is not supported in this "
"configuration.".format(compiler.compiler_type)
) from e
args.extend(flag for flag in flags if flag not in args)
# Mangle sources
if patch_source_func is not None:
if is_clib:
build_info.setdefault('depends', []).extend(build_info['sources'])
new_sources = _generic_patch_sources(build_info['sources'], patch_source_func,
source_fnpart)
build_info['sources'][:] = new_sources
else:
ext.depends.extend(ext.sources)
new_sources = _generic_patch_sources(ext.sources, patch_source_func,
source_fnpart)
ext.sources[:] = new_sources
def _generic_patch_sources(filenames, patch_source_func, source_fnpart, root_dir=None):
"""
Patch Fortran sources, creating new source files.
Parameters
----------
filenames : list
List of Fortran source files to patch.
Files not ending in ``.f`` or ``.f90`` are left unaltered.
patch_source_func : callable(filename, old_contents) -> new_contents
Function to apply to file contents, returning new file contents
as a string.
source_fnpart : str
String to append to the modified file basename before extension.
root_dir : str, optional
Source root directory. Default: cwd
Returns
-------
new_filenames : list
List of names of the newly created patched sources.
"""
new_filenames = []
if root_dir is None:
root_dir = os.getcwd()
root_dir = os.path.abspath(root_dir)
src_dir = os.path.join(root_dir, _get_build_src_dir())
for src in filenames:
base, ext = os.path.splitext(os.path.basename(src))
if ext not in ('.f', '.f90'):
new_filenames.append(src)
continue
with open(src) as fsrc:
text = patch_source_func(src, fsrc.read())
# Generate useful target directory name under src_dir
src_path = os.path.abspath(os.path.dirname(src))
for basedir in [src_dir, root_dir]:
if os.path.commonpath([src_path, basedir]) == basedir:
rel_path = os.path.relpath(src_path, basedir)
break
else:
raise ValueError(f"{src!r} not under {root_dir!r}")
dst = os.path.join(src_dir, rel_path, base + source_fnpart + ext)
write_file_content(dst, text)
new_filenames.append(dst)
return new_filenames
def write_file_content(filename, content):
"""
Write content to file, but only if it differs from the current one.
"""
if os.path.isfile(filename):
with open(filename) as f:
old_content = f.read()
if old_content == content:
return
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
def get_blas_lapack_symbols():
cached = getattr(get_blas_lapack_symbols, 'cached', None)
if cached is not None:
return cached
# Obtain symbol list from Cython Blas/Lapack interface
srcdir = os.path.join(os.path.dirname(__file__), os.pardir, 'linalg')
symbols = []
# Get symbols from the generated files
for fn in ['cython_blas_signatures.txt', 'cython_lapack_signatures.txt']:
with open(os.path.join(srcdir, fn)) as f:
for line in f:
m = re.match(r"^\s*[a-z]+\s+([a-z0-9]+)\(", line)
if m:
symbols.append(m.group(1))
# Get the rest from the generator script
# (we cannot import it directly here, so use exec)
sig_fn = os.path.join(srcdir, '_cython_signature_generator.py')
with open(sig_fn) as f:
code = f.read()
ns = {'__name__': '<module>'}
exec(code, ns)
symbols.extend(ns['blas_exclusions'])
symbols.extend(ns['lapack_exclusions'])
get_blas_lapack_symbols.cached = tuple(sorted(set(symbols)))
return get_blas_lapack_symbols.cached
| 14,937
| 32.493274
| 90
|
py
|
scipy
|
scipy-main/scipy/_build_utils/__init__.py
|
import os
import numpy as np
from ._fortran import *
from .system_info import combine_dict
# Don't use the deprecated NumPy C API. Define this to a fixed version instead of
# NPY_API_VERSION in order not to break compilation for released SciPy versions
# when NumPy introduces a new deprecation. Use in setup.py::
#
# config.add_extension('_name', sources=['source_fname'], **numpy_nodepr_api)
#
numpy_nodepr_api = dict(define_macros=[("NPY_NO_DEPRECATED_API",
"NPY_1_9_API_VERSION")])
def uses_blas64():
return (os.environ.get("NPY_USE_BLAS_ILP64", "0") != "0")
def import_file(folder, module_name):
"""Import a file directly, avoiding importing scipy"""
import importlib
import pathlib
fname = pathlib.Path(folder) / f'{module_name}.py'
spec = importlib.util.spec_from_file_location(module_name, str(fname))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 1,084
| 29.138889
| 81
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.