repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
scipy
|
scipy-main/scipy/ndimage/tests/test_measurements.py
|
import os.path
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_array_equal,
suppress_warnings)
from pytest import raises as assert_raises
import scipy.ndimage as ndimage
from . import types
class Test_measurements_stats:
"""ndimage._measurements._stats() is a utility used by other functions."""
def test_a(self):
x = [0, 1, 2, 6]
labels = [0, 0, 1, 1]
index = [0, 1]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums = ndimage._measurements._stats(
x, labels=labels, index=index)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
def test_b(self):
# Same data as test_a, but different labels. The label 9 exceeds the
# length of 'labels', so this test will follow a different code path.
x = [0, 1, 2, 6]
labels = [0, 0, 9, 9]
index = [0, 9]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums = ndimage._measurements._stats(
x, labels=labels, index=index)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
def test_a_centered(self):
x = [0, 1, 2, 6]
labels = [0, 0, 1, 1]
index = [0, 1]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage._measurements._stats(
x, labels=labels, index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
def test_b_centered(self):
x = [0, 1, 2, 6]
labels = [0, 0, 9, 9]
index = [0, 9]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage._measurements._stats(
x, labels=labels, index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
def test_nonint_labels(self):
x = [0, 1, 2, 6]
labels = [0.0, 0.0, 9.0, 9.0]
index = [0.0, 9.0]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage._measurements._stats(
x, labels=labels, index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
class Test_measurements_select:
"""ndimage._measurements._select() is a utility used by other functions."""
def test_basic(self):
x = [0, 1, 6, 2]
cases = [
([0, 0, 1, 1], [0, 1]), # "Small" integer labels
([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
]
for labels, index in cases:
result = ndimage._measurements._select(
x, labels=labels, index=index)
assert_(len(result) == 0)
result = ndimage._measurements._select(
x, labels=labels, index=index, find_max=True)
assert_(len(result) == 1)
assert_array_equal(result[0], [1, 6])
result = ndimage._measurements._select(
x, labels=labels, index=index, find_min=True)
assert_(len(result) == 1)
assert_array_equal(result[0], [0, 2])
result = ndimage._measurements._select(
x, labels=labels, index=index, find_min=True,
find_min_positions=True)
assert_(len(result) == 2)
assert_array_equal(result[0], [0, 2])
assert_array_equal(result[1], [0, 3])
assert_equal(result[1].dtype.kind, 'i')
result = ndimage._measurements._select(
x, labels=labels, index=index, find_max=True,
find_max_positions=True)
assert_(len(result) == 2)
assert_array_equal(result[0], [1, 6])
assert_array_equal(result[1], [1, 2])
assert_equal(result[1].dtype.kind, 'i')
def test_label01():
data = np.ones([])
out, n = ndimage.label(data)
assert_array_almost_equal(out, 1)
assert_equal(n, 1)
def test_label02():
data = np.zeros([])
out, n = ndimage.label(data)
assert_array_almost_equal(out, 0)
assert_equal(n, 0)
def test_label03():
data = np.ones([1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1])
assert_equal(n, 1)
def test_label04():
data = np.zeros([1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [0])
assert_equal(n, 0)
def test_label05():
data = np.ones([5])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
assert_equal(n, 1)
def test_label06():
data = np.array([1, 0, 1, 1, 0, 1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
assert_equal(n, 3)
def test_label07():
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(n, 0)
def test_label08():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
assert_equal(n, 4)
def test_label09():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
struct = ndimage.generate_binary_structure(2, 2)
out, n = ndimage.label(data, struct)
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[2, 2, 0, 0, 0, 0],
[2, 2, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 0]])
assert_equal(n, 3)
def test_label10():
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
struct = ndimage.generate_binary_structure(2, 2)
out, n = ndimage.label(data, struct)
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(n, 1)
def test_label11():
for type in types:
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]], type)
out, n = ndimage.label(data)
expected = [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]]
assert_array_almost_equal(out, expected)
assert_equal(n, 4)
def test_label11_inplace():
for type in types:
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]], type)
n = ndimage.label(data, output=data)
expected = [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]]
assert_array_almost_equal(data, expected)
assert_equal(n, 4)
def test_label12():
for type in types:
data = np.array([[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0]], type)
out, n = ndimage.label(data)
expected = [[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0]]
assert_array_almost_equal(out, expected)
assert_equal(n, 1)
def test_label13():
for type in types:
data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
type)
out, n = ndimage.label(data)
expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert_array_almost_equal(out, expected)
assert_equal(n, 1)
def test_label_output_typed():
data = np.ones([5])
for t in types:
output = np.zeros([5], dtype=t)
n = ndimage.label(data, output=output)
assert_array_almost_equal(output, 1)
assert_equal(n, 1)
def test_label_output_dtype():
data = np.ones([5])
for t in types:
output, n = ndimage.label(data, output=t)
assert_array_almost_equal(output, 1)
assert output.dtype == t
def test_label_output_wrong_size():
data = np.ones([5])
for t in types:
output = np.zeros([10], t)
assert_raises((RuntimeError, ValueError),
ndimage.label, data, output=output)
def test_label_structuring_elements():
data = np.loadtxt(os.path.join(os.path.dirname(
__file__), "data", "label_inputs.txt"))
strels = np.loadtxt(os.path.join(
os.path.dirname(__file__), "data", "label_strels.txt"))
results = np.loadtxt(os.path.join(
os.path.dirname(__file__), "data", "label_results.txt"))
data = data.reshape((-1, 7, 7))
strels = strels.reshape((-1, 3, 3))
results = results.reshape((-1, 7, 7))
r = 0
for i in range(data.shape[0]):
d = data[i, :, :]
for j in range(strels.shape[0]):
s = strels[j, :, :]
assert_equal(ndimage.label(d, s)[0], results[r, :, :])
r += 1
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
_ = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3, 1240, 1240)
a = np.random.rand(np.prod(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60, 320))
d[:, :257] = 1
d[:, 260:] = 1
d[36, 257] = 1
d[35, 258] = 1
d[35, 259] = 1
assert ndimage.label(d, np.ones((3, 3)))[1] == 1
def test_label_default_dtype():
test_array = np.random.rand(10, 10)
label, no_features = ndimage.label(test_array > 0.5)
assert_(label.dtype in (np.int32, np.int64))
# Shouldn't raise an exception
ndimage.find_objects(label)
def test_find_objects01():
data = np.ones([], dtype=int)
out = ndimage.find_objects(data)
assert_(out == [()])
def test_find_objects02():
data = np.zeros([], dtype=int)
out = ndimage.find_objects(data)
assert_(out == [])
def test_find_objects03():
data = np.ones([1], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None),)])
def test_find_objects04():
data = np.zeros([1], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [])
def test_find_objects05():
data = np.ones([5], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 5, None),)])
def test_find_objects06():
data = np.array([1, 0, 2, 2, 0, 3])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None),),
(slice(2, 4, None),),
(slice(5, 6, None),)])
def test_find_objects07():
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [])
def test_find_objects08():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
(slice(3, 5, None), slice(0, 2, None)),
(slice(5, 6, None), slice(3, 5, None))])
def test_find_objects09():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
None,
(slice(5, 6, None), slice(3, 5, None))])
def test_value_indices01():
"Test dictionary keys and entries"
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
vi = ndimage.value_indices(data, ignore_value=0)
true_keys = [1, 2, 4]
assert_equal(list(vi.keys()), true_keys)
truevi = {}
for k in true_keys:
truevi[k] = np.where(data == k)
vi = ndimage.value_indices(data, ignore_value=0)
assert_equal(vi, truevi)
def test_value_indices02():
"Test input checking"
data = np.zeros((5, 4), dtype=np.float32)
msg = "Parameter 'arr' must be an integer array"
with assert_raises(ValueError, match=msg):
ndimage.value_indices(data)
def test_value_indices03():
"Test different input array shapes, from 1-D to 4-D"
for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
trueKeys = np.unique(a)
vi = ndimage.value_indices(a)
assert_equal(list(vi.keys()), list(trueKeys))
for k in trueKeys:
trueNdx = np.where(a == k)
assert_equal(vi[k], trueNdx)
def test_sum01():
for type in types:
input = np.array([], type)
output = ndimage.sum(input)
assert_equal(output, 0.0)
def test_sum02():
for type in types:
input = np.zeros([0, 4], type)
output = ndimage.sum(input)
assert_equal(output, 0.0)
def test_sum03():
for type in types:
input = np.ones([], type)
output = ndimage.sum(input)
assert_almost_equal(output, 1.0)
def test_sum04():
for type in types:
input = np.array([1, 2], type)
output = ndimage.sum(input)
assert_almost_equal(output, 3.0)
def test_sum05():
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input)
assert_almost_equal(output, 10.0)
def test_sum06():
labels = np.array([], bool)
for type in types:
input = np.array([], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 0.0)
def test_sum07():
labels = np.ones([0, 4], bool)
for type in types:
input = np.zeros([0, 4], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 0.0)
def test_sum08():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([1, 2], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 1.0)
def test_sum09():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, 4.0)
def test_sum10():
labels = np.array([1, 0], bool)
input = np.array([[1, 2], [3, 4]], bool)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, 2.0)
def test_sum11():
labels = np.array([1, 2], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels,
index=2)
assert_almost_equal(output, 6.0)
def test_sum12():
labels = np.array([[1, 2], [2, 4]], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
assert_array_almost_equal(output, [4.0, 0.0, 5.0])
def test_sum_labels():
labels = np.array([[1, 2], [2, 4]], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
output_labels = ndimage.sum_labels(
input, labels=labels, index=[4, 8, 2])
assert (output_sum == output_labels).all()
assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
def test_mean01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, 2.0)
def test_mean02():
labels = np.array([1, 0], bool)
input = np.array([[1, 2], [3, 4]], bool)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_mean03():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels,
index=2)
assert_almost_equal(output, 3.0)
def test_mean04():
labels = np.array([[1, 2], [2, 4]], np.int8)
with np.errstate(all='ignore'):
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels,
index=[4, 8, 2])
assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
assert_(np.isnan(output[1]))
def test_minimum01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_minimum02():
labels = np.array([1, 0], bool)
input = np.array([[2, 2], [2, 4]], bool)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_minimum03():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels,
index=2)
assert_almost_equal(output, 2.0)
def test_minimum04():
labels = np.array([[1, 2], [2, 3]])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels,
index=[2, 3, 8])
assert_array_almost_equal(output, [2.0, 4.0, 0.0])
def test_maximum01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum(input, labels=labels)
assert_almost_equal(output, 3.0)
def test_maximum02():
labels = np.array([1, 0], bool)
input = np.array([[2, 2], [2, 4]], bool)
output = ndimage.maximum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_maximum03():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum(input, labels=labels,
index=2)
assert_almost_equal(output, 4.0)
def test_maximum04():
labels = np.array([[1, 2], [2, 3]])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum(input, labels=labels,
index=[2, 3, 8])
assert_array_almost_equal(output, [3.0, 4.0, 0.0])
def test_maximum05():
# Regression test for ticket #501 (Trac)
x = np.array([-3, -2, -1])
assert_equal(ndimage.maximum(x), -1)
def test_median01():
a = np.array([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
labels = np.array([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
output = ndimage.median(a, labels=labels, index=[1, 2, 3])
assert_array_almost_equal(output, [2.5, 4.0, 6.0])
def test_median02():
a = np.array([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
output = ndimage.median(a)
assert_almost_equal(output, 1.0)
def test_median03():
a = np.array([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
labels = np.array([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
output = ndimage.median(a, labels=labels)
assert_almost_equal(output, 3.0)
def test_median_gh12836_bool():
# test boolean addition fix on example from gh-12836
a = np.asarray([1, 1], dtype=bool)
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
assert_array_almost_equal(output, [1.0])
def test_median_no_int_overflow():
# test integer overflow fix on example from gh-12836
a = np.asarray([65, 70], dtype=np.int8)
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
assert_array_almost_equal(output, [67.5])
def test_variance01():
with np.errstate(all='ignore'):
for type in types:
input = np.array([], type)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
output = ndimage.variance(input)
assert_(np.isnan(output))
def test_variance02():
for type in types:
input = np.array([1], type)
output = ndimage.variance(input)
assert_almost_equal(output, 0.0)
def test_variance03():
for type in types:
input = np.array([1, 3], type)
output = ndimage.variance(input)
assert_almost_equal(output, 1.0)
def test_variance04():
input = np.array([1, 0], bool)
output = ndimage.variance(input)
assert_almost_equal(output, 0.25)
def test_variance05():
labels = [2, 2, 3]
for type in types:
input = np.array([1, 3, 8], type)
output = ndimage.variance(input, labels, 2)
assert_almost_equal(output, 1.0)
def test_variance06():
labels = [2, 2, 3, 3, 4]
with np.errstate(all='ignore'):
for type in types:
input = np.array([1, 3, 8, 10, 8], type)
output = ndimage.variance(input, labels, [2, 3, 4])
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
def test_standard_deviation01():
with np.errstate(all='ignore'):
for type in types:
input = np.array([], type)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
output = ndimage.standard_deviation(input)
assert_(np.isnan(output))
def test_standard_deviation02():
for type in types:
input = np.array([1], type)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, 0.0)
def test_standard_deviation03():
for type in types:
input = np.array([1, 3], type)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, np.sqrt(1.0))
def test_standard_deviation04():
input = np.array([1, 0], bool)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, 0.5)
def test_standard_deviation05():
labels = [2, 2, 3]
for type in types:
input = np.array([1, 3, 8], type)
output = ndimage.standard_deviation(input, labels, 2)
assert_almost_equal(output, 1.0)
def test_standard_deviation06():
labels = [2, 2, 3, 3, 4]
with np.errstate(all='ignore'):
for type in types:
input = np.array([1, 3, 8, 10, 8], type)
output = ndimage.standard_deviation(input, labels, [2, 3, 4])
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
def test_standard_deviation07():
labels = [1]
with np.errstate(all='ignore'):
for type in types:
input = np.array([-0.00619519], type)
output = ndimage.standard_deviation(input, labels, [1])
assert_array_almost_equal(output, [0])
def test_minimum_position01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum_position(input, labels=labels)
assert_equal(output, (0, 0))
def test_minimum_position02():
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], type)
output = ndimage.minimum_position(input)
assert_equal(output, (1, 2))
def test_minimum_position03():
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], bool)
output = ndimage.minimum_position(input)
assert_equal(output, (1, 2))
def test_minimum_position04():
input = np.array([[5, 4, 2, 5],
[3, 7, 1, 2],
[1, 5, 1, 1]], bool)
output = ndimage.minimum_position(input)
assert_equal(output, (0, 0))
def test_minimum_position05():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 2, 3]], type)
output = ndimage.minimum_position(input, labels)
assert_equal(output, (2, 0))
def test_minimum_position06():
labels = [1, 2, 3, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], type)
output = ndimage.minimum_position(input, labels, 2)
assert_equal(output, (0, 1))
def test_minimum_position07():
labels = [1, 2, 3, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], type)
output = ndimage.minimum_position(input, labels,
[2, 3])
assert_equal(output[0], (0, 1))
assert_equal(output[1], (1, 2))
def test_maximum_position01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum_position(input,
labels=labels)
assert_equal(output, (1, 0))
def test_maximum_position02():
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input)
assert_equal(output, (1, 2))
def test_maximum_position03():
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], bool)
output = ndimage.maximum_position(input)
assert_equal(output, (0, 0))
def test_maximum_position04():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels)
assert_equal(output, (1, 1))
def test_maximum_position05():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels, 1)
assert_equal(output, (0, 0))
def test_maximum_position06():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels,
[1, 2])
assert_equal(output[0], (0, 0))
assert_equal(output[1], (1, 1))
def test_maximum_position07():
# Test float labels
labels = np.array([1.0, 2.5, 0.0, 4.5])
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels,
[1.0, 4.5])
assert_equal(output[0], (0, 0))
assert_equal(output[1], (0, 3))
def test_extrema01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output1 = ndimage.extrema(input, labels=labels)
output2 = ndimage.minimum(input, labels=labels)
output3 = ndimage.maximum(input, labels=labels)
output4 = ndimage.minimum_position(input,
labels=labels)
output5 = ndimage.maximum_position(input,
labels=labels)
assert_equal(output1, (output2, output3, output4, output5))
def test_extrema02():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output1 = ndimage.extrema(input, labels=labels,
index=2)
output2 = ndimage.minimum(input, labels=labels,
index=2)
output3 = ndimage.maximum(input, labels=labels,
index=2)
output4 = ndimage.minimum_position(input,
labels=labels, index=2)
output5 = ndimage.maximum_position(input,
labels=labels, index=2)
assert_equal(output1, (output2, output3, output4, output5))
def test_extrema03():
labels = np.array([[1, 2], [2, 3]])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output1 = ndimage.extrema(input, labels=labels,
index=[2, 3, 8])
output2 = ndimage.minimum(input, labels=labels,
index=[2, 3, 8])
output3 = ndimage.maximum(input, labels=labels,
index=[2, 3, 8])
output4 = ndimage.minimum_position(input,
labels=labels, index=[2, 3, 8])
output5 = ndimage.maximum_position(input,
labels=labels, index=[2, 3, 8])
assert_array_almost_equal(output1[0], output2)
assert_array_almost_equal(output1[1], output3)
assert_array_almost_equal(output1[2], output4)
assert_array_almost_equal(output1[3], output5)
def test_extrema04():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output1 = ndimage.extrema(input, labels, [1, 2])
output2 = ndimage.minimum(input, labels, [1, 2])
output3 = ndimage.maximum(input, labels, [1, 2])
output4 = ndimage.minimum_position(input, labels,
[1, 2])
output5 = ndimage.maximum_position(input, labels,
[1, 2])
assert_array_almost_equal(output1[0], output2)
assert_array_almost_equal(output1[1], output3)
assert_array_almost_equal(output1[2], output4)
assert_array_almost_equal(output1[3], output5)
def test_center_of_mass01():
expected = [0.0, 0.0]
for type in types:
input = np.array([[1, 0], [0, 0]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass02():
expected = [1, 0]
for type in types:
input = np.array([[0, 0], [1, 0]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass03():
expected = [0, 1]
for type in types:
input = np.array([[0, 1], [0, 0]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass04():
expected = [1, 1]
for type in types:
input = np.array([[0, 0], [0, 1]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass05():
expected = [0.5, 0.5]
for type in types:
input = np.array([[1, 1], [1, 1]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass06():
expected = [0.5, 0.5]
input = np.array([[1, 2], [3, 1]], bool)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass07():
labels = [1, 0]
expected = [0.5, 0.0]
input = np.array([[1, 2], [3, 1]], bool)
output = ndimage.center_of_mass(input, labels)
assert_array_almost_equal(output, expected)
def test_center_of_mass08():
labels = [1, 2]
expected = [0.5, 1.0]
input = np.array([[5, 2], [3, 1]], bool)
output = ndimage.center_of_mass(input, labels, 2)
assert_array_almost_equal(output, expected)
def test_center_of_mass09():
labels = [1, 2]
expected = [(0.5, 0.0), (0.5, 1.0)]
input = np.array([[1, 2], [1, 1]], bool)
output = ndimage.center_of_mass(input, labels, [1, 2])
assert_array_almost_equal(output, expected)
def test_histogram01():
expected = np.ones(10)
input = np.arange(10)
output = ndimage.histogram(input, 0, 10, 10)
assert_array_almost_equal(output, expected)
def test_histogram02():
labels = [1, 1, 1, 1, 2, 2, 2, 2]
expected = [0, 2, 0, 1, 1]
input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
output = ndimage.histogram(input, 0, 4, 5, labels, 1)
assert_array_almost_equal(output, expected)
def test_histogram03():
labels = [1, 0, 1, 1, 2, 2, 2, 2]
expected1 = [0, 1, 0, 1, 1]
expected2 = [0, 0, 0, 3, 0]
input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
assert_array_almost_equal(output[0], expected1)
assert_array_almost_equal(output[1], expected2)
def test_stat_funcs_2d():
a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
mean = ndimage.mean(a, labels=lbl, index=[1, 2])
assert_array_equal(mean, [7.0, 4.0])
var = ndimage.variance(a, labels=lbl, index=[1, 2])
assert_array_equal(var, [2.5, 1.0])
std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
med = ndimage.median(a, labels=lbl, index=[1, 2])
assert_array_equal(med, [7.0, 4.0])
min = ndimage.minimum(a, labels=lbl, index=[1, 2])
assert_array_equal(min, [5, 3])
max = ndimage.maximum(a, labels=lbl, index=[1, 2])
assert_array_equal(max, [9, 5])
class TestWatershedIft:
def test_watershed_ift01(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift02(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift03(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]], np.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift04(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
np.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift05(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
np.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift06(self):
data = np.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift07(self):
shape = (7, 6)
data = np.zeros(shape, dtype=np.uint8)
data = data.transpose()
data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = np.zeros(shape, dtype=np.int16)
out = out.transpose()
ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
output=out)
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift08(self):
# Test cost larger than uint8. See gh-10069.
data = np.array([[256, 0],
[0, 0]], np.uint16)
markers = np.array([[1, 0],
[0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[1, 1],
[1, 1]]
assert_array_almost_equal(out, expected)
| 47,782
| 33.302225
| 79
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_filters.py
|
''' Some tests for filters '''
import functools
import itertools
import math
import numpy
from numpy.testing import (assert_equal, assert_allclose,
assert_array_almost_equal,
assert_array_equal, assert_almost_equal,
suppress_warnings, assert_)
import pytest
from pytest import raises as assert_raises
from scipy import ndimage
from scipy.ndimage._filters import _gaussian_kernel1d
from . import types, float_types, complex_types
def sumsq(a, b):
return math.sqrt(((a - b)**2).sum())
def _complex_correlate(array, kernel, real_dtype, convolve=False,
mode="reflect", cval=0, ):
"""Utility to perform a reference complex-valued convolutions.
When convolve==False, correlation is performed instead
"""
array = numpy.asarray(array)
kernel = numpy.asarray(kernel)
complex_array = array.dtype.kind == 'c'
complex_kernel = kernel.dtype.kind == 'c'
if array.ndim == 1:
func = ndimage.convolve1d if convolve else ndimage.correlate1d
else:
func = ndimage.convolve if convolve else ndimage.correlate
if not convolve:
kernel = kernel.conj()
if complex_array and complex_kernel:
# use: real(cval) for array.real component
# imag(cval) for array.imag component
output = (
func(array.real, kernel.real, output=real_dtype,
mode=mode, cval=numpy.real(cval)) -
func(array.imag, kernel.imag, output=real_dtype,
mode=mode, cval=numpy.imag(cval)) +
1j * func(array.imag, kernel.real, output=real_dtype,
mode=mode, cval=numpy.imag(cval)) +
1j * func(array.real, kernel.imag, output=real_dtype,
mode=mode, cval=numpy.real(cval))
)
elif complex_array:
output = (
func(array.real, kernel, output=real_dtype, mode=mode,
cval=numpy.real(cval)) +
1j * func(array.imag, kernel, output=real_dtype, mode=mode,
cval=numpy.imag(cval))
)
elif complex_kernel:
# real array so cval is real too
output = (
func(array, kernel.real, output=real_dtype, mode=mode, cval=cval) +
1j * func(array, kernel.imag, output=real_dtype, mode=mode,
cval=cval)
)
return output
def _cases_axes_tuple_length_mismatch():
# Generate combinations of filter function, valid kwargs, and
# keyword-value pairs for which the value will become with mismatched
# (invalid) size
filter_func = ndimage.gaussian_filter
kwargs = dict(radius=3, mode='constant', sigma=1.0, order=0)
for key, val in kwargs.items():
yield filter_func, kwargs, key, val
filter_funcs = [ndimage.uniform_filter, ndimage.minimum_filter,
ndimage.maximum_filter]
kwargs = dict(size=3, mode='constant', origin=0)
for filter_func in filter_funcs:
for key, val in kwargs.items():
yield filter_func, kwargs, key, val
class TestNdimageFilters:
def _validate_complex(self, array, kernel, type2, mode='reflect', cval=0):
# utility for validating complex-valued correlations
real_dtype = numpy.asarray([], dtype=type2).real.dtype
expected = _complex_correlate(
array, kernel, real_dtype, convolve=False, mode=mode, cval=cval
)
if array.ndim == 1:
correlate = functools.partial(ndimage.correlate1d, axis=-1,
mode=mode, cval=cval)
convolve = functools.partial(ndimage.convolve1d, axis=-1,
mode=mode, cval=cval)
else:
correlate = functools.partial(ndimage.correlate, mode=mode,
cval=cval)
convolve = functools.partial(ndimage.convolve, mode=mode,
cval=cval)
# test correlate output dtype
output = correlate(array, kernel, output=type2)
assert_array_almost_equal(expected, output)
assert_equal(output.dtype.type, type2)
# test correlate with pre-allocated output
output = numpy.zeros_like(array, dtype=type2)
correlate(array, kernel, output=output)
assert_array_almost_equal(expected, output)
# test convolve output dtype
output = convolve(array, kernel, output=type2)
expected = _complex_correlate(
array, kernel, real_dtype, convolve=True, mode=mode, cval=cval,
)
assert_array_almost_equal(expected, output)
assert_equal(output.dtype.type, type2)
# convolve with pre-allocated output
convolve(array, kernel, output=output)
assert_array_almost_equal(expected, output)
assert_equal(output.dtype.type, type2)
# warns if the output is not a complex dtype
with pytest.warns(UserWarning,
match="promoting specified output dtype to complex"):
correlate(array, kernel, output=real_dtype)
with pytest.warns(UserWarning,
match="promoting specified output dtype to complex"):
convolve(array, kernel, output=real_dtype)
# raises if output array is provided, but is not complex-valued
output_real = numpy.zeros_like(array, dtype=real_dtype)
with assert_raises(RuntimeError):
correlate(array, kernel, output=output_real)
with assert_raises(RuntimeError):
convolve(array, kernel, output=output_real)
def test_correlate01(self):
array = numpy.array([1, 2])
weights = numpy.array([2])
expected = [2, 4]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate01_overlap(self):
array = numpy.arange(256).reshape(16, 16)
weights = numpy.array([2])
expected = 2 * array
ndimage.correlate1d(array, weights, output=array)
assert_array_almost_equal(array, expected)
def test_correlate02(self):
array = numpy.array([1, 2, 3])
kernel = numpy.array([1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate03(self):
array = numpy.array([1])
weights = numpy.array([1, 1])
expected = [2]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate04(self):
array = numpy.array([1, 2])
tcor = [2, 3]
tcov = [3, 4]
weights = numpy.array([1, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate05(self):
array = numpy.array([1, 2, 3])
tcor = [2, 3, 5]
tcov = [3, 5, 6]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(tcov, output)
def test_correlate06(self):
array = numpy.array([1, 2, 3])
tcor = [9, 14, 17]
tcov = [7, 10, 15]
weights = numpy.array([1, 2, 3])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate07(self):
array = numpy.array([1, 2, 3])
expected = [5, 8, 11]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate08(self):
array = numpy.array([1, 2, 3])
tcor = [1, 2, 5]
tcov = [3, 6, 7]
weights = numpy.array([1, 2, -1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate09(self):
array = []
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate10(self):
array = [[]]
kernel = numpy.array([[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate11(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 1],
[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
def test_correlate12(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 0],
[0, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_kernel', types)
def test_correlate13(self, dtype_array, dtype_kernel):
kernel = numpy.array([[1, 0],
[0, 1]])
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_array)
output = ndimage.correlate(array, kernel, output=dtype_kernel)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, dtype_kernel)
output = ndimage.convolve(array, kernel,
output=dtype_kernel)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, dtype_kernel)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_correlate14(self, dtype_array, dtype_output):
kernel = numpy.array([[1, 0],
[0, 1]])
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_array)
output = numpy.zeros(array.shape, dtype_output)
ndimage.correlate(array, kernel, output=output)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, dtype_output)
ndimage.convolve(array, kernel, output=output)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, dtype_output)
@pytest.mark.parametrize('dtype_array', types)
def test_correlate15(self, dtype_array):
kernel = numpy.array([[1, 0],
[0, 1]])
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_array)
output = ndimage.correlate(array, kernel, output=numpy.float32)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel, output=numpy.float32)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
@pytest.mark.parametrize('dtype_array', types)
def test_correlate16(self, dtype_array):
kernel = numpy.array([[0.5, 0],
[0, 0.5]])
array = numpy.array([[1, 2, 3], [4, 5, 6]], dtype_array)
output = ndimage.correlate(array, kernel, output=numpy.float32)
assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel, output=numpy.float32)
assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate17(self):
array = numpy.array([1, 2, 3])
tcor = [3, 5, 6]
tcov = [2, 3, 5]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
@pytest.mark.parametrize('dtype_array', types)
def test_correlate18(self, dtype_array):
kernel = numpy.array([[1, 0],
[0, 1]])
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_array)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate_mode_sequence(self):
kernel = numpy.ones((2, 2))
array = numpy.ones((3, 3), float)
with assert_raises(RuntimeError):
ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
with assert_raises(RuntimeError):
ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
@pytest.mark.parametrize('dtype_array', types)
def test_correlate19(self, dtype_array):
kernel = numpy.array([[1, 0],
[0, 1]])
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_array)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
assert_equal(output.dtype.type, numpy.float32)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_correlate20(self, dtype_array, dtype_output):
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
array = numpy.array([[1, 2, 3],
[2, 4, 6]], dtype_array)
output = numpy.zeros((2, 3), dtype_output)
ndimage.correlate1d(array, weights, axis=0, output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0, output=output)
assert_array_almost_equal(output, expected)
def test_correlate21(self):
array = numpy.array([[1, 2, 3],
[2, 4, 6]])
expected = [[5, 10, 15], [7, 14, 21]]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_correlate22(self, dtype_array, dtype_output):
weights = numpy.array([1, 2, 1])
expected = [[6, 12, 18], [6, 12, 18]]
array = numpy.array([[1, 2, 3],
[2, 4, 6]], dtype_array)
output = numpy.zeros((2, 3), dtype_output)
ndimage.correlate1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_correlate23(self, dtype_array, dtype_output):
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
array = numpy.array([[1, 2, 3],
[2, 4, 6]], dtype_array)
output = numpy.zeros((2, 3), dtype_output)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_correlate24(self, dtype_array, dtype_output):
weights = numpy.array([1, 2, 1])
tcor = [[7, 14, 21], [8, 16, 24]]
tcov = [[4, 8, 12], [5, 10, 15]]
array = numpy.array([[1, 2, 3],
[2, 4, 6]], dtype_array)
output = numpy.zeros((2, 3), dtype_output)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcov)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_correlate25(self, dtype_array, dtype_output):
weights = numpy.array([1, 2, 1])
tcor = [[4, 8, 12], [5, 10, 15]]
tcov = [[7, 14, 21], [8, 16, 24]]
array = numpy.array([[1, 2, 3],
[2, 4, 6]], dtype_array)
output = numpy.zeros((2, 3), dtype_output)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcov)
def test_correlate26(self):
# test fix for gh-11661 (mirror extension of a length 1 signal)
y = ndimage.convolve1d(numpy.ones(1), numpy.ones(5), mode='mirror')
assert_array_equal(y, numpy.array(5.))
y = ndimage.correlate1d(numpy.ones(1), numpy.ones(5), mode='mirror')
assert_array_equal(y, numpy.array(5.))
@pytest.mark.parametrize('dtype_kernel', complex_types)
@pytest.mark.parametrize('dtype_input', types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate_complex_kernel(self, dtype_input, dtype_kernel,
dtype_output):
kernel = numpy.array([[1, 0],
[0, 1 + 1j]], dtype_kernel)
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_input)
self._validate_complex(array, kernel, dtype_output)
@pytest.mark.parametrize('dtype_kernel', complex_types)
@pytest.mark.parametrize('dtype_input', types)
@pytest.mark.parametrize('dtype_output', complex_types)
@pytest.mark.parametrize('mode', ['grid-constant', 'constant'])
def test_correlate_complex_kernel_cval(self, dtype_input, dtype_kernel,
dtype_output, mode):
# test use of non-zero cval with complex inputs
# also verifies that mode 'grid-constant' does not segfault
kernel = numpy.array([[1, 0],
[0, 1 + 1j]], dtype_kernel)
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_input)
self._validate_complex(array, kernel, dtype_output, mode=mode,
cval=5.0)
@pytest.mark.parametrize('dtype_kernel', complex_types)
@pytest.mark.parametrize('dtype_input', types)
def test_correlate_complex_kernel_invalid_cval(self, dtype_input,
dtype_kernel):
# cannot give complex cval with a real image
kernel = numpy.array([[1, 0],
[0, 1 + 1j]], dtype_kernel)
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype_input)
for func in [ndimage.convolve, ndimage.correlate, ndimage.convolve1d,
ndimage.correlate1d]:
with pytest.raises(ValueError):
func(array, kernel, mode='constant', cval=5.0 + 1.0j,
output=numpy.complex64)
@pytest.mark.parametrize('dtype_kernel', complex_types)
@pytest.mark.parametrize('dtype_input', types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate1d_complex_kernel(self, dtype_input, dtype_kernel,
dtype_output):
kernel = numpy.array([1, 1 + 1j], dtype_kernel)
array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
self._validate_complex(array, kernel, dtype_output)
@pytest.mark.parametrize('dtype_kernel', complex_types)
@pytest.mark.parametrize('dtype_input', types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate1d_complex_kernel_cval(self, dtype_input, dtype_kernel,
dtype_output):
kernel = numpy.array([1, 1 + 1j], dtype_kernel)
array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
self._validate_complex(array, kernel, dtype_output, mode='constant',
cval=5.0)
@pytest.mark.parametrize('dtype_kernel', types)
@pytest.mark.parametrize('dtype_input', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate_complex_input(self, dtype_input, dtype_kernel,
dtype_output):
kernel = numpy.array([[1, 0],
[0, 1]], dtype_kernel)
array = numpy.array([[1, 2j, 3],
[1 + 4j, 5, 6j]], dtype_input)
self._validate_complex(array, kernel, dtype_output)
@pytest.mark.parametrize('dtype_kernel', types)
@pytest.mark.parametrize('dtype_input', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate1d_complex_input(self, dtype_input, dtype_kernel,
dtype_output):
kernel = numpy.array([1, 0, 1], dtype_kernel)
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
self._validate_complex(array, kernel, dtype_output)
@pytest.mark.parametrize('dtype_kernel', types)
@pytest.mark.parametrize('dtype_input', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate1d_complex_input_cval(self, dtype_input, dtype_kernel,
dtype_output):
kernel = numpy.array([1, 0, 1], dtype_kernel)
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
self._validate_complex(array, kernel, dtype_output, mode='constant',
cval=5 - 3j)
@pytest.mark.parametrize('dtype', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate_complex_input_and_kernel(self, dtype, dtype_output):
kernel = numpy.array([[1, 0],
[0, 1 + 1j]], dtype)
array = numpy.array([[1, 2j, 3],
[1 + 4j, 5, 6j]], dtype)
self._validate_complex(array, kernel, dtype_output)
@pytest.mark.parametrize('dtype', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate_complex_input_and_kernel_cval(self, dtype,
dtype_output):
kernel = numpy.array([[1, 0],
[0, 1 + 1j]], dtype)
array = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype)
self._validate_complex(array, kernel, dtype_output, mode='constant',
cval=5.0 + 2.0j)
@pytest.mark.parametrize('dtype', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate1d_complex_input_and_kernel(self, dtype, dtype_output):
kernel = numpy.array([1, 1 + 1j], dtype)
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
self._validate_complex(array, kernel, dtype_output)
@pytest.mark.parametrize('dtype', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_correlate1d_complex_input_and_kernel_cval(self, dtype,
dtype_output):
kernel = numpy.array([1, 1 + 1j], dtype)
array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
self._validate_complex(array, kernel, dtype_output, mode='constant',
cval=5.0 + 2.0j)
def test_gauss01(self):
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 0)
assert_array_almost_equal(output, input)
def test_gauss02(self):
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 1.0)
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
def test_gauss03(self):
# single precision data
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
output = ndimage.gaussian_filter(input, [1.0, 1.0])
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
# input.sum() is 49995000.0. With single precision floats, we can't
# expect more than 8 digits of accuracy, so use decimal=0 in this test.
assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
decimal=0)
assert_(sumsq(input, output) > 1.0)
def test_gauss04(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss05(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0],
order=1, output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss06(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
assert_array_almost_equal(output1, output2)
def test_gauss_memory_overlap(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
output1 = ndimage.gaussian_filter(input, 1.0)
ndimage.gaussian_filter(input, 1.0, output=input)
assert_array_almost_equal(output1, input)
@pytest.mark.parametrize(('filter_func', 'extra_args', 'size0', 'size'),
[(ndimage.gaussian_filter, (), 0, 1.0),
(ndimage.uniform_filter, (), 1, 3),
(ndimage.minimum_filter, (), 1, 3),
(ndimage.maximum_filter, (), 1, 3),
(ndimage.median_filter, (), 1, 3),
(ndimage.rank_filter, (1,), 1, 3),
(ndimage.percentile_filter, (40,), 1, 3)])
@pytest.mark.parametrize(
'axes',
tuple(itertools.combinations(range(-3, 3), 1))
+ tuple(itertools.combinations(range(-3, 3), 2))
+ ((0, 1, 2),))
def test_filter_axes(self, filter_func, extra_args, size0, size, axes):
# Note: `size` is called `sigma` in `gaussian_filter`
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
axes = numpy.array(axes)
if len(set(axes % array.ndim)) != len(axes):
# parametrized cases with duplicate axes raise an error
with pytest.raises(ValueError, match="axes must be unique"):
filter_func(array, *extra_args, size, axes=axes)
return
output = filter_func(array, *extra_args, size, axes=axes)
# result should be equivalent to sigma=0.0/size=1 on unfiltered axes
all_sizes = (size if ax in (axes % array.ndim) else size0
for ax in range(array.ndim))
expected = filter_func(array, *extra_args, all_sizes)
assert_allclose(output, expected)
kwargs_gauss = dict(radius=[4, 2, 3], order=[0, 1, 2],
mode=['reflect', 'nearest', 'constant'])
kwargs_other = dict(origin=(-1, 0, 1),
mode=['reflect', 'nearest', 'constant'])
kwargs_rank = dict(origin=(-1, 0, 1))
@pytest.mark.parametrize("filter_func, size0, size, kwargs",
[(ndimage.gaussian_filter, 0, 1.0, kwargs_gauss),
(ndimage.uniform_filter, 1, 3, kwargs_other),
(ndimage.maximum_filter, 1, 3, kwargs_other),
(ndimage.minimum_filter, 1, 3, kwargs_other),
(ndimage.median_filter, 1, 3, kwargs_rank),
(ndimage.rank_filter, 1, 3, kwargs_rank),
(ndimage.percentile_filter, 1, 3, kwargs_rank)])
@pytest.mark.parametrize('axes', itertools.combinations(range(-3, 3), 2))
def test_filter_axes_kwargs(self, filter_func, size0, size, kwargs, axes):
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
kwargs = {key: numpy.array(val) for key, val in kwargs.items()}
axes = numpy.array(axes)
n_axes = axes.size
if filter_func == ndimage.rank_filter:
args = (2,) # (rank,)
elif filter_func == ndimage.percentile_filter:
args = (30,) # (percentile,)
else:
args = ()
# form kwargs that specify only the axes in `axes`
reduced_kwargs = {key: val[axes] for key, val in kwargs.items()}
if len(set(axes % array.ndim)) != len(axes):
# parametrized cases with duplicate axes raise an error
with pytest.raises(ValueError, match="axes must be unique"):
filter_func(array, *args, [size]*n_axes, axes=axes,
**reduced_kwargs)
return
output = filter_func(array, *args, [size]*n_axes, axes=axes,
**reduced_kwargs)
# result should be equivalent to sigma=0.0/size=1 on unfiltered axes
size_3d = numpy.full(array.ndim, fill_value=size0)
size_3d[axes] = size
if 'origin' in kwargs:
# origin should be zero on the axis that has size 0
origin = numpy.array([0, 0, 0])
origin[axes] = reduced_kwargs['origin']
kwargs['origin'] = origin
expected = filter_func(array, *args, size_3d, **kwargs)
assert_allclose(output, expected)
@pytest.mark.parametrize(
'filter_func, args',
[(ndimage.gaussian_filter, (1.0,)), # args = (sigma,)
(ndimage.uniform_filter, (3,)), # args = (size,)
(ndimage.minimum_filter, (3,)), # args = (size,)
(ndimage.maximum_filter, (3,)), # args = (size,)
(ndimage.median_filter, (3,)), # args = (size,)
(ndimage.rank_filter, (2, 3)), # args = (rank, size)
(ndimage.percentile_filter, (30, 3))]) # args = (percentile, size)
@pytest.mark.parametrize(
'axes', [(1.5,), (0, 1, 2, 3), (3,), (-4,)]
)
def test_filter_invalid_axes(self, filter_func, args, axes):
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
if any(isinstance(ax, float) for ax in axes):
error_class = TypeError
match = "cannot be interpreted as an integer"
else:
error_class = ValueError
match = "out of range"
with pytest.raises(error_class, match=match):
filter_func(array, *args, axes=axes)
@pytest.mark.parametrize(
'filter_func, kwargs',
[(ndimage.minimum_filter, {}),
(ndimage.maximum_filter, {}),
(ndimage.median_filter, {}),
(ndimage.rank_filter, dict(rank=3)),
(ndimage.percentile_filter, dict(percentile=30))])
@pytest.mark.parametrize(
'axes', [(0, ), (1, 2), (0, 1, 2)]
)
@pytest.mark.parametrize('separable_footprint', [False, True])
def test_filter_invalid_footprint_ndim(self, filter_func, kwargs, axes,
separable_footprint):
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
# create a footprint with one too many dimensions
footprint = numpy.ones((3,) * (len(axes) + 1))
if not separable_footprint:
footprint[(0,) * footprint.ndim] = 0
if (filter_func in [ndimage.minimum_filter, ndimage.maximum_filter]
and separable_footprint):
match = "sequence argument must have length equal to input rank"
else:
match = "footprint array has incorrect shape"
with pytest.raises(RuntimeError, match=match):
filter_func(array, **kwargs, footprint=footprint, axes=axes)
@pytest.mark.parametrize('n_mismatch', [1, 3])
@pytest.mark.parametrize('filter_func, kwargs, key, val',
_cases_axes_tuple_length_mismatch())
def test_filter_tuple_length_mismatch(self, n_mismatch, filter_func,
kwargs, key, val):
# Test for the intended RuntimeError when a kwargs has an invalid size
array = numpy.arange(6 * 8 * 12, dtype=numpy.float64).reshape(6, 8, 12)
kwargs = dict(**kwargs, axes=(0, 1))
kwargs[key] = (val,) * n_mismatch
err_msg = "sequence argument must have length equal to input rank"
with pytest.raises(RuntimeError, match=err_msg):
filter_func(array, **kwargs)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_prewitt01(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = ndimage.prewitt(array, 0)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_prewitt02(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = numpy.zeros(array.shape, dtype)
ndimage.prewitt(array, 0, output)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_prewitt03(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_prewitt04(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.prewitt(array, -1)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_sobel01(sel, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = ndimage.sobel(array, 0)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_sobel02(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = numpy.zeros(array.shape, dtype)
ndimage.sobel(array, 0, output)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_sobel03(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
output = numpy.zeros(array.shape, dtype)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_sobel04(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
t = ndimage.sobel(array, -1)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
@pytest.mark.parametrize('dtype',
[numpy.int32, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
def test_laplace01(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = ndimage.laplace(array)
assert_array_almost_equal(tmp1 + tmp2, output)
@pytest.mark.parametrize('dtype',
[numpy.int32, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
def test_laplace02(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = numpy.zeros(array.shape, dtype)
ndimage.laplace(array, output=output)
assert_array_almost_equal(tmp1 + tmp2, output)
@pytest.mark.parametrize('dtype',
[numpy.int32, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
def test_gaussian_laplace01(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = ndimage.gaussian_laplace(array, 1.0)
assert_array_almost_equal(tmp1 + tmp2, output)
@pytest.mark.parametrize('dtype',
[numpy.int32, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
def test_gaussian_laplace02(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = numpy.zeros(array.shape, dtype)
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp1 + tmp2, output)
@pytest.mark.parametrize('dtype', types + complex_types)
def test_generic_laplace01(self, dtype):
def derivative2(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 2
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
output = numpy.zeros(array.shape, dtype)
tmp = ndimage.generic_laplace(array, derivative2,
extra_arguments=(1.0,),
extra_keywords={'b': 2.0})
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp, output)
@pytest.mark.parametrize('dtype',
[numpy.int32, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
def test_gaussian_gradient_magnitude01(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = ndimage.gaussian_gradient_magnitude(array, 1.0)
expected = tmp1 * tmp1 + tmp2 * tmp2
expected = numpy.sqrt(expected).astype(dtype)
assert_array_almost_equal(expected, output)
@pytest.mark.parametrize('dtype',
[numpy.int32, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
def test_gaussian_gradient_magnitude02(self, dtype):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = numpy.zeros(array.shape, dtype)
ndimage.gaussian_gradient_magnitude(array, 1.0, output)
expected = tmp1 * tmp1 + tmp2 * tmp2
expected = numpy.sqrt(expected).astype(dtype)
assert_array_almost_equal(expected, output)
def test_generic_gradient_magnitude01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], numpy.float64)
def derivative(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 1
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
tmp2 = ndimage.generic_gradient_magnitude(
array, derivative, extra_arguments=(1.0,),
extra_keywords={'b': 2.0})
assert_array_almost_equal(tmp1, tmp2)
def test_uniform01(self):
array = numpy.array([2, 4, 6])
size = 2
output = ndimage.uniform_filter1d(array, size, origin=-1)
assert_array_almost_equal([3, 5, 6], output)
def test_uniform01_complex(self):
array = numpy.array([2 + 1j, 4 + 2j, 6 + 3j], dtype=numpy.complex128)
size = 2
output = ndimage.uniform_filter1d(array, size, origin=-1)
assert_array_almost_equal([3, 5, 6], output.real)
assert_array_almost_equal([1.5, 2.5, 3], output.imag)
def test_uniform02(self):
array = numpy.array([1, 2, 3])
filter_shape = [0]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform03(self):
array = numpy.array([1, 2, 3])
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform04(self):
array = numpy.array([2, 4, 6])
filter_shape = [2]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 5], output)
def test_uniform05(self):
array = []
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([], output)
@pytest.mark.parametrize('dtype_array', types)
@pytest.mark.parametrize('dtype_output', types)
def test_uniform06(self, dtype_array, dtype_output):
filter_shape = [2, 2]
array = numpy.array([[4, 8, 12],
[16, 20, 24]], dtype_array)
output = ndimage.uniform_filter(
array, filter_shape, output=dtype_output)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
assert_equal(output.dtype.type, dtype_output)
@pytest.mark.parametrize('dtype_array', complex_types)
@pytest.mark.parametrize('dtype_output', complex_types)
def test_uniform06_complex(self, dtype_array, dtype_output):
filter_shape = [2, 2]
array = numpy.array([[4, 8 + 5j, 12],
[16, 20, 24]], dtype_array)
output = ndimage.uniform_filter(
array, filter_shape, output=dtype_output)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output.real)
assert_equal(output.dtype.type, dtype_output)
def test_minimum_filter01(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter02(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter03(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([3, 2, 2, 1, 1], output)
def test_minimum_filter04(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([2, 2, 1, 1, 1], output)
def test_minimum_filter05(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
def test_minimum_filter05_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
ndimage.minimum_filter(array, filter_shape, output=array)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], array)
def test_minimum_filter06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.minimum_filter(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
# separable footprint should allow mode sequence
output2 = ndimage.minimum_filter(array, footprint=footprint,
mode=['reflect', 'reflect'])
assert_array_almost_equal(output2, output)
def test_minimum_filter07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
with assert_raises(RuntimeError):
ndimage.minimum_filter(array, footprint=footprint,
mode=['reflect', 'constant'])
def test_minimum_filter08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
assert_array_almost_equal([[3, 1, 3, 1, 1],
[5, 3, 3, 1, 1],
[3, 3, 1, 1, 1]], output)
def test_minimum_filter09(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal([[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1],
[5, 3, 3, 1, 1]], output)
def test_maximum_filter01(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([1, 2, 3, 4, 5], output)
def test_maximum_filter02(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 4, 5, 5], output)
def test_maximum_filter03(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_maximum_filter04(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 5, 5, 5, 4], output)
def test_maximum_filter05(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
def test_maximum_filter06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.maximum_filter(array, footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
# separable footprint should allow mode sequence
output2 = ndimage.maximum_filter(array, footprint=footprint,
mode=['reflect', 'reflect'])
assert_array_almost_equal(output2, output)
def test_maximum_filter07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array, footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]], output)
# non-separable footprint should not allow mode sequence
with assert_raises(RuntimeError):
ndimage.maximum_filter(array, footprint=footprint,
mode=['reflect', 'reflect'])
def test_maximum_filter08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
assert_array_almost_equal([[7, 9, 9, 5, 5],
[9, 8, 9, 7, 5],
[8, 8, 7, 7, 7]], output)
def test_maximum_filter09(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
@pytest.mark.parametrize(
'axes', tuple(itertools.combinations(range(-3, 3), 2))
)
@pytest.mark.parametrize(
'filter_func, kwargs',
[(ndimage.minimum_filter, {}),
(ndimage.maximum_filter, {}),
(ndimage.median_filter, {}),
(ndimage.rank_filter, dict(rank=3)),
(ndimage.percentile_filter, dict(percentile=60))]
)
def test_minmax_nonseparable_axes(self, filter_func, axes, kwargs):
array = numpy.arange(6 * 8 * 12, dtype=numpy.float32).reshape(6, 8, 12)
# use 2D triangular footprint because it is non-separable
footprint = numpy.tri(5)
axes = numpy.array(axes)
if len(set(axes % array.ndim)) != len(axes):
# parametrized cases with duplicate axes raise an error
with pytest.raises(ValueError):
filter_func(array, footprint=footprint, axes=axes, **kwargs)
return
output = filter_func(array, footprint=footprint, axes=axes, **kwargs)
missing_axis = tuple(set(range(3)) - set(axes % array.ndim))[0]
footprint_3d = numpy.expand_dims(footprint, missing_axis)
expected = filter_func(array, footprint=footprint_3d, **kwargs)
assert_allclose(output, expected)
def test_rank01(self):
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=2)
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, 2)
assert_array_almost_equal(array, output)
def test_rank02(self):
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=[3])
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, (3,))
assert_array_almost_equal(array, output)
def test_rank03(self):
array = numpy.array([3, 2, 5, 1, 4])
output = ndimage.rank_filter(array, 1, size=[2])
assert_array_almost_equal([3, 3, 5, 5, 4], output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_rank04(self):
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, 1, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=3)
assert_array_almost_equal(expected, output)
def test_rank05(self):
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, -2, size=3)
assert_array_almost_equal(expected, output)
def test_rank06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[2, 2, 1, 1, 1],
[3, 3, 2, 1, 1],
[5, 5, 3, 3, 1]]
output = ndimage.rank_filter(array, 1, size=[2, 3])
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 17, size=(2, 3))
assert_array_almost_equal(expected, output)
def test_rank06_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
array_copy = array.copy()
expected = [[2, 2, 1, 1, 1],
[3, 3, 2, 1, 1],
[5, 5, 3, 3, 1]]
ndimage.rank_filter(array, 1, size=[2, 3], output=array)
assert_array_almost_equal(expected, array)
ndimage.percentile_filter(array_copy, 17, size=(2, 3),
output=array_copy)
assert_array_almost_equal(expected, array_copy)
def test_rank07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 5, 5, 5, 4],
[5, 5, 7, 5, 4],
[6, 8, 8, 7, 5]]
output = ndimage.rank_filter(array, -2, size=[2, 3])
assert_array_almost_equal(expected, output)
def test_rank08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 3, 2, 4, 4],
[5, 5, 5, 4, 4],
[5, 6, 7, 5, 5]]
output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.rank_filter(array, 3, size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=(2, 3))
assert_array_almost_equal(expected, output)
# non-separable: does not allow mode sequence
with assert_raises(RuntimeError):
ndimage.percentile_filter(array, 50.0, size=(2, 3),
mode=['reflect', 'constant'])
with assert_raises(RuntimeError):
ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
with assert_raises(RuntimeError):
ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
@pytest.mark.parametrize('dtype', types)
def test_rank09(self, dtype):
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
output = ndimage.rank_filter(array, 1, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 35, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank10(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, 0, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank11(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, -1, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
assert_array_almost_equal(expected, output)
@pytest.mark.parametrize('dtype', types)
def test_rank12(self, dtype):
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
output = ndimage.rank_filter(array, 1, footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, footprint=footprint)
assert_array_almost_equal(expected, output)
@pytest.mark.parametrize('dtype', types)
def test_rank13(self, dtype):
expected = [[5, 2, 5, 1, 1],
[5, 8, 3, 5, 5],
[6, 6, 5, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
output = ndimage.rank_filter(array, 1, footprint=footprint,
origin=-1)
assert_array_almost_equal(expected, output)
@pytest.mark.parametrize('dtype', types)
def test_rank14(self, dtype):
expected = [[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5],
[5, 6, 6, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
output = ndimage.rank_filter(array, 1, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal(expected, output)
@pytest.mark.parametrize('dtype', types)
def test_rank15(self, dtype):
expected = [[2, 3, 1, 4, 1],
[5, 3, 7, 1, 1],
[5, 5, 3, 3, 3]]
footprint = [[1, 0, 1], [0, 1, 0]]
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], dtype)
output = ndimage.rank_filter(array, 0, footprint=footprint,
origin=[-1, 0])
assert_array_almost_equal(expected, output)
@pytest.mark.parametrize('dtype', types)
def test_generic_filter1d01(self, dtype):
weights = numpy.array([1.1, 2.2, 3.3])
def _filter_func(input, output, fltr, total):
fltr = fltr / total
for ii in range(input.shape[0] - 2):
output[ii] = input[ii] * fltr[0]
output[ii] += input[ii + 1] * fltr[1]
output[ii] += input[ii + 2] * fltr[2]
a = numpy.arange(12, dtype=dtype)
a.shape = (3, 4)
r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
r2 = ndimage.generic_filter1d(
a, _filter_func, 3, axis=0, origin=-1,
extra_arguments=(weights,),
extra_keywords={'total': weights.sum()})
assert_array_almost_equal(r1, r2)
@pytest.mark.parametrize('dtype', types)
def test_generic_filter01(self, dtype):
filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
footprint = numpy.array([[1, 0], [0, 1]])
cf = numpy.array([1., 4.])
def _filter_func(buffer, weights, total=1.0):
weights = cf / total
return (buffer * weights).sum()
a = numpy.arange(12, dtype=dtype)
a.shape = (3, 4)
r1 = ndimage.correlate(a, filter_ * footprint)
if dtype in float_types:
r1 /= 5
else:
r1 //= 5
r2 = ndimage.generic_filter(
a, _filter_func, footprint=footprint, extra_arguments=(cf,),
extra_keywords={'total': cf.sum()})
assert_array_almost_equal(r1, r2)
# generic_filter doesn't allow mode sequence
with assert_raises(RuntimeError):
r2 = ndimage.generic_filter(
a, _filter_func, mode=['reflect', 'reflect'],
footprint=footprint, extra_arguments=(cf,),
extra_keywords={'total': cf.sum()})
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1, 1, 2]),
('wrap', [3, 1, 2]),
('reflect', [1, 1, 2]),
('mirror', [2, 1, 2]),
('constant', [0, 1, 2])]
)
def test_extend01(self, mode, expected_value):
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0])
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1, 1, 1]),
('wrap', [3, 1, 2]),
('reflect', [3, 3, 2]),
('mirror', [1, 2, 3]),
('constant', [0, 0, 0])]
)
def test_extend02(self, mode, expected_value):
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [2, 3, 3]),
('wrap', [2, 3, 1]),
('reflect', [2, 3, 3]),
('mirror', [2, 3, 2]),
('constant', [2, 3, 0])]
)
def test_extend03(self, mode, expected_value):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 1])
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [3, 3, 3]),
('wrap', [2, 3, 1]),
('reflect', [2, 1, 1]),
('mirror', [1, 2, 3]),
('constant', [0, 0, 0])]
)
def test_extend04(self, mode, expected_value):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
('wrap', [[9, 7, 8], [3, 1, 2], [6, 4, 5]]),
('reflect', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
('mirror', [[5, 4, 5], [2, 1, 2], [5, 4, 5]]),
('constant', [[0, 0, 0], [0, 1, 2], [0, 4, 5]])]
)
def test_extend05(self, mode, expected_value):
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[1, 0], [0, 0]])
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
('wrap', [[5, 6, 4], [8, 9, 7], [2, 3, 1]]),
('reflect', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
('mirror', [[5, 6, 5], [8, 9, 8], [5, 6, 5]]),
('constant', [[5, 6, 0], [8, 9, 0], [0, 0, 0]])]
)
def test_extend06(self, mode, expected_value):
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [3, 3, 3]),
('wrap', [2, 3, 1]),
('reflect', [2, 1, 1]),
('mirror', [1, 2, 3]),
('constant', [0, 0, 0])]
)
def test_extend07(self, mode, expected_value):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [[3], [3], [3]]),
('wrap', [[2], [3], [1]]),
('reflect', [[2], [1], [1]]),
('mirror', [[1], [2], [3]]),
('constant', [[0], [0], [0]])]
)
def test_extend08(self, mode, expected_value):
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [3, 3, 3]),
('wrap', [2, 3, 1]),
('reflect', [2, 1, 1]),
('mirror', [1, 2, 3]),
('constant', [0, 0, 0])]
)
def test_extend09(self, mode, expected_value):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [[3], [3], [3]]),
('wrap', [[2], [3], [1]]),
('reflect', [[2], [1], [1]]),
('mirror', [[1], [2], [3]]),
('constant', [[0], [0], [0]])]
)
def test_extend10(self, mode, expected_value):
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
output = ndimage.correlate(array, weights, mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_ticket_701():
# Test generic filter sizes
arr = numpy.arange(4).reshape((2, 2))
def func(x):
return numpy.min(x)
res = ndimage.generic_filter(arr, func, size=(1, 1))
# The following raises an error unless ticket 701 is fixed
res2 = ndimage.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_gh_5430():
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
sigma = numpy.int32(1)
out = ndimage._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
sigma = numpy.int64(1)
out = ndimage._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = 1
out = ndimage._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = [1, 1]
out = ndimage._ni_support._normalize_sequence(sigma, 2)
assert_equal(out, sigma)
# Also include the OPs original example to make sure we fixed the issue
x = numpy.random.normal(size=(256, 256))
perlin = numpy.zeros_like(x)
for i in 2**numpy.arange(6):
perlin += ndimage.gaussian_filter(x, i, mode="wrap") * i**2
# This also fixes gh-4106, show that the OPs example now runs.
x = numpy.int64(21)
ndimage._ni_support._normalize_sequence(x, 0)
def test_gaussian_kernel1d():
radius = 10
sigma = 2
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius + 1, dtype=numpy.double)
phi_x = numpy.exp(-0.5 * x * x / sigma2)
phi_x /= phi_x.sum()
assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
_gaussian_kernel1d(sigma, 2, radius))
assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
_gaussian_kernel1d(sigma, 3, radius))
def test_orders_gauss():
# Check order inputs to Gaussians
arr = numpy.zeros((1,))
assert_equal(0, ndimage.gaussian_filter(arr, 1, order=0))
assert_equal(0, ndimage.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, ndimage.gaussian_filter, arr, 1, -1)
assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=0))
assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=3))
assert_raises(ValueError, ndimage.gaussian_filter1d, arr, 1, -1, -1)
def test_valid_origins():
"""Regression test for #1311."""
def func(x):
return numpy.mean(x)
data = numpy.array([1, 2, 3, 4, 5], dtype=numpy.float64)
assert_raises(ValueError, ndimage.generic_filter, data, func, size=3,
origin=2)
assert_raises(ValueError, ndimage.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, ndimage.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [ndimage.uniform_filter, ndimage.minimum_filter,
ndimage.maximum_filter, ndimage.maximum_filter1d,
ndimage.median_filter, ndimage.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_bad_convolve_and_correlate_origins():
"""Regression test for gh-822."""
# Before gh-822 was fixed, these would generate seg. faults or
# other crashes on many system.
assert_raises(ValueError, ndimage.correlate1d,
[0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
assert_raises(ValueError, ndimage.correlate,
[0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
assert_raises(ValueError, ndimage.correlate,
numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, 1])
assert_raises(ValueError, ndimage.convolve1d,
numpy.arange(10), numpy.ones(3), origin=-2)
assert_raises(ValueError, ndimage.convolve,
numpy.arange(10), numpy.ones(3), origin=[-2])
assert_raises(ValueError, ndimage.convolve,
numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, -2])
def test_multiple_modes():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying a single mode.
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
mode1 = 'reflect'
mode2 = ['reflect', 'reflect']
assert_equal(ndimage.gaussian_filter(arr, 1, mode=mode1),
ndimage.gaussian_filter(arr, 1, mode=mode2))
assert_equal(ndimage.prewitt(arr, mode=mode1),
ndimage.prewitt(arr, mode=mode2))
assert_equal(ndimage.sobel(arr, mode=mode1),
ndimage.sobel(arr, mode=mode2))
assert_equal(ndimage.laplace(arr, mode=mode1),
ndimage.laplace(arr, mode=mode2))
assert_equal(ndimage.gaussian_laplace(arr, 1, mode=mode1),
ndimage.gaussian_laplace(arr, 1, mode=mode2))
assert_equal(ndimage.maximum_filter(arr, size=5, mode=mode1),
ndimage.maximum_filter(arr, size=5, mode=mode2))
assert_equal(ndimage.minimum_filter(arr, size=5, mode=mode1),
ndimage.minimum_filter(arr, size=5, mode=mode2))
assert_equal(ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode1),
ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode2))
assert_equal(ndimage.uniform_filter(arr, 5, mode=mode1),
ndimage.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = ndimage.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = ndimage.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
ndimage.gaussian_filter(arr, 1, mode=modes))
expected = ndimage.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = ndimage.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
ndimage.uniform_filter(arr, 5, mode=modes))
expected = ndimage.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = ndimage.maximum_filter1d(expected, size=5, axis=1,
mode=modes[1])
assert_equal(expected,
ndimage.maximum_filter(arr, size=5, mode=modes))
expected = ndimage.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = ndimage.minimum_filter1d(expected, size=5, axis=1,
mode=modes[1])
assert_equal(expected,
ndimage.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_prewitt():
# Test prewitt filter for multiple extrapolation modes
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = numpy.array([[1., -3., 2.],
[1., -2., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
ndimage.prewitt(arr, mode=modes))
def test_multiple_modes_sobel():
# Test sobel filter for multiple extrapolation modes
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = numpy.array([[1., -4., 3.],
[2., -3., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
ndimage.sobel(arr, mode=modes))
def test_multiple_modes_laplace():
# Test laplace filter for multiple extrapolation modes
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = numpy.array([[-2., 2., 1.],
[-2., -3., 2.],
[1., 1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
ndimage.laplace(arr, mode=modes))
def test_multiple_modes_gaussian_laplace():
# Test gaussian_laplace filter for multiple extrapolation modes
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = numpy.array([[-0.28438687, 0.01559809, 0.19773499],
[-0.36630503, -0.20069774, 0.07483620],
[0.15849176, 0.18495566, 0.21934094]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
ndimage.gaussian_laplace(arr, 1, mode=modes))
def test_multiple_modes_gaussian_gradient_magnitude():
# Test gaussian_gradient_magnitude filter for multiple
# extrapolation modes
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = numpy.array([[0.04928965, 0.09745625, 0.06405368],
[0.23056905, 0.14025305, 0.04550846],
[0.19894369, 0.14950060, 0.06796850]])
modes = ['reflect', 'wrap']
calculated = ndimage.gaussian_gradient_magnitude(arr, 1, mode=modes)
assert_almost_equal(expected, calculated)
def test_multiple_modes_uniform():
# Test uniform filter for multiple extrapolation modes
arr = numpy.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = numpy.array([[0.32, 0.40, 0.48],
[0.20, 0.28, 0.32],
[0.28, 0.32, 0.40]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
ndimage.uniform_filter(arr, 5, mode=modes))
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = numpy.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (ndimage.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (ndimage.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = ndimage.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = numpy.zeros(51)
x[25] = 1
f = ndimage.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = ndimage.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = numpy.nonzero(y != 0)[0]
n = numpy.ptp(nonzero_indices) + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = ndimage.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = numpy.nonzero(y != 0)[0]
n = numpy.ptp(nonzero_indices) + 1
assert_equal(n, 15)
def test_gaussian_radius():
# Test that Gaussian filters with radius argument produce the same
# results as the filters with corresponding truncate argument.
# radius = int(truncate * sigma + 0.5)
# Test gaussian_filter1d
x = numpy.zeros(7)
x[3] = 1
f1 = ndimage.gaussian_filter1d(x, sigma=2, truncate=1.5)
f2 = ndimage.gaussian_filter1d(x, sigma=2, radius=3)
assert_equal(f1, f2)
# Test gaussian_filter when sigma is a number.
a = numpy.zeros((9, 9))
a[4, 4] = 1
f1 = ndimage.gaussian_filter(a, sigma=0.5, truncate=3.5)
f2 = ndimage.gaussian_filter(a, sigma=0.5, radius=2)
assert_equal(f1, f2)
# Test gaussian_filter when sigma is a sequence.
a = numpy.zeros((50, 50))
a[25, 25] = 1
f1 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], truncate=3.5)
f2 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], radius=[2, 9])
assert_equal(f1, f2)
def test_gaussian_radius_invalid():
# radius must be a nonnegative integer
with assert_raises(ValueError):
ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=-1)
with assert_raises(ValueError):
ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=1.1)
class TestThreading:
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]})
for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = numpy.random.randn(5000)
os = numpy.empty((4, d.size))
ot = numpy.empty_like(os)
k = numpy.arange(5)
self.check_func_serial(4, ndimage.correlate1d, (d, k), os)
self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = numpy.random.randn(500, 500)
k = numpy.random.randn(10, 10)
os = numpy.empty([4] + list(d.shape))
ot = numpy.empty_like(os)
self.check_func_serial(4, ndimage.correlate, (d, k), os)
self.check_func_thread(4, ndimage.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = numpy.random.randn(500, 500)
os = numpy.empty([4] + list(d.shape))
ot = numpy.empty_like(os)
self.check_func_serial(4, ndimage.median_filter, (d, 3), os)
self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = numpy.random.randn(5000)
os = numpy.empty((4, d.size))
ot = numpy.empty_like(os)
self.check_func_serial(4, ndimage.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = numpy.random.randn(500, 500)
os = numpy.empty([4] + list(d.shape))
ot = numpy.empty_like(os)
self.check_func_serial(4, ndimage.maximum_filter, (d, 3), os)
self.check_func_thread(4, ndimage.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, ndimage.minimum_filter, (d, 3), os)
self.check_func_thread(4, ndimage.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = numpy.arange(10)
out = ndimage.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = ndimage.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = ndimage.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test constant
out = ndimage.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = ndimage.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = ndimage.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = ndimage.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = ndimage.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = ndimage.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_uniform_filter1d_roundoff_errors():
# gh-6930
in_ = numpy.repeat([0, 1, 0], [9, 9, 9])
for filter_size in range(3, 10):
out = ndimage.uniform_filter1d(in_, filter_size)
assert_equal(out.sum(), 10 - filter_size)
def test_footprint_all_zeros():
# regression test for gh-6876: footprint of all zeros segfaults
arr = numpy.random.randint(0, 100, (100, 100))
kernel = numpy.zeros((3, 3), bool)
with assert_raises(ValueError):
ndimage.maximum_filter(arr, footprint=kernel)
def test_gaussian_filter():
# Test gaussian filter with numpy.float16
# gh-8207
data = numpy.array([1], dtype=numpy.float16)
sigma = 1.0
with assert_raises(RuntimeError):
ndimage.gaussian_filter(data, sigma)
def test_rank_filter_noninteger_rank():
# regression test for issue 9388: ValueError for
# non integer rank when performing rank_filter
arr = numpy.random.random((10, 20, 30))
assert_raises(TypeError, ndimage.rank_filter, arr, 0.5,
footprint=numpy.ones((1, 1, 10), dtype=bool))
def test_size_footprint_both_set():
# test for input validation, expect user warning when
# size and footprint is set
with suppress_warnings() as sup:
sup.filter(UserWarning,
"ignoring size because footprint is set")
arr = numpy.random.random((10, 20, 30))
ndimage.rank_filter(arr, 5, size=2, footprint=numpy.ones((1, 1, 10),
dtype=bool))
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = numpy.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.median_filter(a, (3, 3))
b = numpy.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
| 93,324
| 41.614155
| 79
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_splines.py
|
"""Tests for spline filtering."""
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'grid-wrap':
start, step = -1, -1
else:
raise ValueError(f'unsupported mode {mode}')
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
@pytest.mark.parametrize('mode', ['mirror', 'grid-wrap', 'reflect'])
def test_spline_filter_vs_matrix_solution(order, mode):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order,
mode=mode)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order,
mode=mode)
matrix = make_spline_knot_matrix(n, order, mode=mode)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
| 2,199
| 32.333333
| 76
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_interpolation.py
|
import sys
import numpy
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.ndimage as ndimage
from . import types
eps = 1e-12
ndimage_to_numpy_mode = {
'mirror': 'reflect',
'reflect': 'symmetric',
'grid-mirror': 'symmetric',
'grid-wrap': 'wrap',
'nearest': 'edge',
'grid-constant': 'constant',
}
class TestNdimageInterpolation:
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
)
def test_boundaries(self, mode, expected_value):
def shift(x):
return (x[0] + 0.5,)
data = numpy.array([1, 2, 3, 4.])
assert_array_equal(
expected_value,
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(7,), order=1))
@pytest.mark.parametrize(
'mode, expected_value',
[('nearest', [1, 1, 2, 3]),
('wrap', [3, 1, 2, 3]),
('grid-wrap', [4, 1, 2, 3]),
('mirror', [2, 1, 2, 3]),
('reflect', [1, 1, 2, 3]),
('constant', [-1, 1, 2, 3]),
('grid-constant', [-1, 1, 2, 3])]
)
def test_boundaries2(self, mode, expected_value):
def shift(x):
return (x[0] - 0.9,)
data = numpy.array([1, 2, 3, 4])
assert_array_equal(
expected_value,
ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
output_shape=(4,)))
@pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
'grid-wrap', 'grid-constant',
'nearest'])
@pytest.mark.parametrize('order', range(6))
def test_boundary_spline_accuracy(self, mode, order):
"""Tests based on examples from gh-2640"""
data = numpy.arange(-6, 7, dtype=float)
x = numpy.linspace(-8, 15, num=1000)
y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
# compute expected value using explicit padding via numpy.pad
npad = 32
pad_mode = ndimage_to_numpy_mode.get(mode)
padded = numpy.pad(data, npad, mode=pad_mode)
expected = ndimage.map_coordinates(padded, [npad + x], order=order,
mode=mode)
atol = 1e-5 if mode == 'grid-constant' else 1e-12
assert_allclose(y, expected, rtol=1e-7, atol=atol)
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline01(self, dtype, order):
data = numpy.ones([], dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline02(self, dtype, order):
data = numpy.array([1], dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline03(self, dtype, order):
data = numpy.ones([], dtype)
out = ndimage.spline_filter(data, order, output=dtype)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline04(self, dtype, order):
data = numpy.ones([4], dtype)
out = ndimage.spline_filter(data, order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(2, 6))
@pytest.mark.parametrize('dtype', types)
def test_spline05(self, dtype, order):
data = numpy.ones([4, 4], dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform01(self, order):
data = numpy.array([1])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform02(self, order):
data = numpy.ones([4])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform03(self, order):
data = numpy.ones([4])
def mapping(x):
return (x[0] - 1,)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform04(self, order):
data = numpy.array([4, 1, 3, 2])
def mapping(x):
return (x[0] - 1,)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_geometric_transform05(self, order, dtype):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=dtype)
expected = numpy.array([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]], dtype=dtype)
if data.dtype.kind == 'c':
data -= 1j * data
expected -= 1j * expected
def mapping(x):
return (x[0], x[1] - 1)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform06(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0], x[1] - 1)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform07(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1])
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform08(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
out = ndimage.geometric_transform(data, mapping, data.shape,
order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform10(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.geometric_transform(filtered, mapping, data.shape,
order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform13(self, order):
data = numpy.ones([2], numpy.float64)
def mapping(x):
return (x[0] // 2,)
out = ndimage.geometric_transform(data, mapping, [4], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform14(self, order):
data = [1, 5, 2, 6, 3, 7, 4, 4]
def mapping(x):
return (2 * x[0],)
out = ndimage.geometric_transform(data, mapping, [4], order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform15(self, order):
data = [1, 2, 3, 4]
def mapping(x):
return (x[0] / 2,)
out = ndimage.geometric_transform(data, mapping, [8], order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform16(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] * 2)
out = ndimage.geometric_transform(data, mapping, (3, 2),
order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform17(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1])
out = ndimage.geometric_transform(data, mapping, (1, 4),
order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform18(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1] * 2)
out = ndimage.geometric_transform(data, mapping, (1, 2),
order=order)
assert_array_almost_equal(out, [[1, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform19(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] / 2)
out = ndimage.geometric_transform(data, mapping, (3, 8),
order=order)
assert_array_almost_equal(out[..., ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform20(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1])
out = ndimage.geometric_transform(data, mapping, (6, 4),
order=order)
assert_array_almost_equal(out[::2, ...], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform21(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1] / 2)
out = ndimage.geometric_transform(data, mapping, (6, 8),
order=order)
assert_array_almost_equal(out[::2, ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform22(self, order):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
def mapping1(x):
return (x[0] / 2, x[1] / 2)
def mapping2(x):
return (x[0] * 2, x[1] * 2)
out = ndimage.geometric_transform(data, mapping1,
(6, 8), order=order)
out = ndimage.geometric_transform(out, mapping2,
(3, 4), order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform23(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (1, x[0] * 2)
out = ndimage.geometric_transform(data, mapping, (2,), order=order)
out = out.astype(numpy.int32)
assert_array_almost_equal(out, [5, 7])
@pytest.mark.parametrize('order', range(0, 6))
def test_geometric_transform24(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x, a, b):
return (a, x[0] * b)
out = ndimage.geometric_transform(
data, mapping, (2,), order=order, extra_arguments=(1,),
extra_keywords={'b': 2})
assert_array_almost_equal(out, [5, 7])
def test_geometric_transform_grid_constant_order1(self):
# verify interpolation outside the original bounds
x = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype=float)
def mapping(x):
return (x[0] - 0.5), (x[1] - 0.5)
expected_result = numpy.array([[0.25, 0.75, 1.25],
[1.25, 3.00, 4.00]])
assert_array_almost_equal(
ndimage.geometric_transform(x, mapping, mode='grid-constant',
order=1),
expected_result,
)
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
'mirror', 'reflect'])
@pytest.mark.parametrize('order', range(6))
def test_geometric_transform_vs_padded(self, order, mode):
x = numpy.arange(144, dtype=float).reshape(12, 12)
def mapping(x):
return (x[0] - 0.4), (x[1] + 2.3)
# Manually pad and then extract center after the transform to get the
# expected result.
npad = 24
pad_mode = ndimage_to_numpy_mode.get(mode)
xp = numpy.pad(x, npad, mode=pad_mode)
center_slice = tuple([slice(npad, -npad)] * x.ndim)
expected_result = ndimage.geometric_transform(
xp, mapping, mode=mode, order=order)[center_slice]
assert_allclose(
ndimage.geometric_transform(x, mapping, mode=mode,
order=order),
expected_result,
rtol=1e-7,
)
def test_geometric_transform_endianness_with_output_parameter(self):
# geometric transform given output ndarray or dtype with
# non-native endianness. see issue #4127
data = numpy.array([1])
def mapping(x):
return x
for out in [data.dtype, data.dtype.newbyteorder(),
numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.geometric_transform(data, mapping, data.shape,
output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [1])
def test_geometric_transform_with_string_output(self):
data = numpy.array([1])
def mapping(x):
return x
out = ndimage.geometric_transform(data, mapping, output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_map_coordinates01(self, order, dtype):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
expected = numpy.array([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
if data.dtype.kind == 'c':
data = data - 1j * data
expected = expected - 1j * expected
idx = numpy.indices(data.shape)
idx -= 1
out = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_map_coordinates02(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape, numpy.float64)
idx -= 0.5
out1 = ndimage.shift(data, 0.5, order=order)
out2 = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out1, out2)
def test_map_coordinates03(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]], order='F')
idx = numpy.indices(data.shape) - 1
out = ndimage.map_coordinates(data, idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
idx = numpy.indices(data[::2].shape) - 1
out = ndimage.map_coordinates(data[::2], idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3]])
assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
idx = numpy.indices(data[:, ::2].shape) - 1
out = ndimage.map_coordinates(data[:, ::2], idx)
assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
def test_map_coordinates_endianness_with_output_parameter(self):
# output parameter given as array or dtype with either endianness
# see issue #4127
data = numpy.array([[1, 2], [7, 6]])
expected = numpy.array([[0, 0], [0, 1]])
idx = numpy.indices(data.shape)
idx -= 1
for out in [
data.dtype,
data.dtype.newbyteorder(),
numpy.empty_like(expected),
numpy.empty_like(expected).astype(expected.dtype.newbyteorder())
]:
returned = ndimage.map_coordinates(data, idx, output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, expected)
def test_map_coordinates_with_string_output(self):
data = numpy.array([[1]])
idx = numpy.indices(data.shape)
out = ndimage.map_coordinates(data, idx, output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [[1]])
@pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
reason='do not run on 32 bit or windows '
'(no sparse memory)')
def test_map_coordinates_large_data(self):
# check crash on large data
try:
n = 30000
a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
# fill the part we might read
a[n - 3:, n - 3:] = 0
ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
except MemoryError as e:
raise pytest.skip('Not enough memory available') from e
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform01(self, order):
data = numpy.array([1])
out = ndimage.affine_transform(data, [[1]], order=order)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform02(self, order):
data = numpy.ones([4])
out = ndimage.affine_transform(data, [[1]], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform03(self, order):
data = numpy.ones([4])
out = ndimage.affine_transform(data, [[1]], -1, order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform04(self, order):
data = numpy.array([4, 1, 3, 2])
out = ndimage.affine_transform(data, [[1]], -1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_affine_transform05(self, order, dtype):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=dtype)
expected = numpy.array([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]], dtype=dtype)
if data.dtype.kind == 'c':
data -= 1j * data
expected -= 1j * expected
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform06(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform07(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[-1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform08(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
[-1, -1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform09(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
[-1, -1], order=order,
prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform10(self, order):
data = numpy.ones([2], numpy.float64)
out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform11(self, order):
data = [1, 5, 2, 6, 3, 7, 4, 4]
out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform12(self, order):
data = [1, 2, 3, 4]
out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform13(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform14(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform15(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
order=order)
assert_array_almost_equal(out, [[1, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform16(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform17(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform18(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform19(self, order):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
(6, 8), order=order)
out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
(3, 4), order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform20(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
order=order)
assert_array_almost_equal(out, [1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform21(self, order):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
order=order)
assert_array_almost_equal(out, [1, 9])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform22(self, order):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
out = ndimage.affine_transform(data, [[2]], [-1], (3,),
order=order)
assert_array_almost_equal(out, [0, 1, 2])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform23(self, order):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
order=order)
assert_array_almost_equal(out[::2], [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform24(self, order):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array .* '
'has changed')
out1 = ndimage.affine_transform(data, [2], -1, order=order)
out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
assert_array_almost_equal(out1, out2)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform25(self, order):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array .* '
'has changed')
out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
assert_array_almost_equal(out1, out2)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform26(self, order):
# test homogeneous coordinates
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
tform_original = numpy.eye(2)
offset_original = -numpy.ones((2, 1))
tform_h1 = numpy.hstack((tform_original, offset_original))
tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
out1 = ndimage.affine_transform(filtered, tform_original,
offset_original.ravel(),
order=order, prefilter=False)
out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
prefilter=False)
out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
prefilter=False)
for out in [out1, out2, out3]:
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform27(self):
# test valid homogeneous transformation matrix
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
def test_affine_transform_1d_endianness_with_output_parameter(self):
# 1d affine transform given output ndarray or dtype with
# either endianness. see issue #7388
data = numpy.ones((2, 2))
for out in [numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder()),
data.dtype, data.dtype.newbyteorder()]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array '
'.* has changed')
returned = ndimage.affine_transform(data, [1, 1], output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [[1, 1], [1, 1]])
def test_affine_transform_multi_d_endianness_with_output_parameter(self):
# affine transform given output ndarray or dtype with either endianness
# see issue #4127
data = numpy.array([1])
for out in [data.dtype, data.dtype.newbyteorder(),
numpy.empty_like(data),
numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
returned = ndimage.affine_transform(data, [[1]], output=out)
result = out if returned is None else returned
assert_array_almost_equal(result, [1])
def test_affine_transform_output_shape(self):
# don't require output_shape when out of a different size is given
data = numpy.arange(8, dtype=numpy.float64)
out = numpy.ones((16,))
ndimage.affine_transform(data, [[1]], output=out)
assert_array_almost_equal(out[:8], data)
# mismatched output shape raises an error
with pytest.raises(RuntimeError):
ndimage.affine_transform(
data, [[1]], output=out, output_shape=(12,))
def test_affine_transform_with_string_output(self):
data = numpy.array([1])
out = ndimage.affine_transform(data, [[1]], output='f')
assert_(out.dtype is numpy.dtype('f'))
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('shift',
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform_shift_via_grid_wrap(self, shift, order):
# For mode 'grid-wrap', integer shifts should match numpy.roll
x = numpy.array([[0, 1],
[2, 3]])
affine = numpy.zeros((2, 3))
affine[:2, :2] = numpy.eye(2)
affine[:, 2] = shift
assert_array_almost_equal(
ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
numpy.roll(x, shift, axis=(0, 1)),
)
@pytest.mark.parametrize('order', range(0, 6))
def test_affine_transform_shift_reflect(self, order):
# shift by x.shape results in reflection
x = numpy.array([[0, 1, 2],
[3, 4, 5]])
affine = numpy.zeros((2, 3))
affine[:2, :2] = numpy.eye(2)
affine[:, 2] = x.shape
assert_array_almost_equal(
ndimage.affine_transform(x, affine, mode='reflect', order=order),
x[::-1, ::-1],
)
@pytest.mark.parametrize('order', range(0, 6))
def test_shift01(self, order):
data = numpy.array([1])
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift02(self, order):
data = numpy.ones([4])
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift03(self, order):
data = numpy.ones([4])
out = ndimage.shift(data, -1, order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift04(self, order):
data = numpy.array([4, 1, 3, 2])
out = ndimage.shift(data, 1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_shift05(self, order, dtype):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=dtype)
expected = numpy.array([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]], dtype=dtype)
if data.dtype.kind == 'c':
data -= 1j * data
expected -= 1j * expected
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_shift_with_nonzero_cval(self, order, mode, dtype):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=dtype)
expected = numpy.array([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]], dtype=dtype)
if data.dtype.kind == 'c':
data -= 1j * data
expected -= 1j * expected
cval = 5.0
expected[:, 0] = cval # specific to shift of [0, 1] used below
out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_shift06(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift07(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.shift(data, [1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift08(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
out = ndimage.shift(data, [1, 1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift09(self, order):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
if (order > 1):
filtered = ndimage.spline_filter(data, order=order)
else:
filtered = data
out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
@pytest.mark.parametrize('shift',
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift_grid_wrap(self, shift, order):
# For mode 'grid-wrap', integer shifts should match numpy.roll
x = numpy.array([[0, 1],
[2, 3]])
assert_array_almost_equal(
ndimage.shift(x, shift, mode='grid-wrap', order=order),
numpy.roll(x, shift, axis=(0, 1)),
)
@pytest.mark.parametrize('shift',
[(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
@pytest.mark.parametrize('order', range(0, 6))
def test_shift_grid_constant1(self, shift, order):
# For integer shifts, 'constant' and 'grid-constant' should be equal
x = numpy.arange(20).reshape((5, 4))
assert_array_almost_equal(
ndimage.shift(x, shift, mode='grid-constant', order=order),
ndimage.shift(x, shift, mode='constant', order=order),
)
def test_shift_grid_constant_order1(self):
x = numpy.array([[1, 2, 3],
[4, 5, 6]], dtype=float)
expected_result = numpy.array([[0.25, 0.75, 1.25],
[1.25, 3.00, 4.00]])
assert_array_almost_equal(
ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
expected_result,
)
@pytest.mark.parametrize('order', range(0, 6))
def test_shift_reflect(self, order):
# shift by x.shape results in reflection
x = numpy.array([[0, 1, 2],
[3, 4, 5]])
assert_array_almost_equal(
ndimage.shift(x, x.shape, mode='reflect', order=order),
x[::-1, ::-1],
)
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('prefilter', [False, True])
def test_shift_nearest_boundary(self, order, prefilter):
# verify that shifting at least order // 2 beyond the end of the array
# gives a value equal to the edge value.
x = numpy.arange(16)
kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
assert_array_almost_equal(
ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
)
assert_array_almost_equal(
ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
)
@pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
'mirror', 'reflect'])
@pytest.mark.parametrize('order', range(6))
def test_shift_vs_padded(self, order, mode):
x = numpy.arange(144, dtype=float).reshape(12, 12)
shift = (0.4, -2.3)
# manually pad and then extract center to get expected result
npad = 32
pad_mode = ndimage_to_numpy_mode.get(mode)
xp = numpy.pad(x, npad, mode=pad_mode)
center_slice = tuple([slice(npad, -npad)] * x.ndim)
expected_result = ndimage.shift(
xp, shift, mode=mode, order=order)[center_slice]
assert_allclose(
ndimage.shift(x, shift, mode=mode, order=order),
expected_result,
rtol=1e-7,
)
@pytest.mark.parametrize('order', range(0, 6))
def test_zoom1(self, order):
for z in [2, [2, 2]]:
arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
arr = ndimage.zoom(arr, z, order=order)
assert_equal(arr.shape, (10, 10))
assert_(numpy.all(arr[-1, :] != 0))
assert_(numpy.all(arr[-1, :] >= (20 - eps)))
assert_(numpy.all(arr[0, :] <= (5 + eps)))
assert_(numpy.all(arr >= (0 - eps)))
assert_(numpy.all(arr <= (24 + eps)))
def test_zoom2(self):
arr = numpy.arange(12).reshape((3, 4))
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
assert_array_equal(out, arr)
def test_zoom3(self):
arr = numpy.array([[1, 2]])
out1 = ndimage.zoom(arr, (2, 1))
out2 = ndimage.zoom(arr, (1, 2))
assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_zoom_affine01(self, order, dtype):
data = numpy.asarray([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=dtype)
if data.dtype.kind == 'c':
data -= 1j * data
with suppress_warnings() as sup:
sup.filter(UserWarning,
'The behavior of affine_transform with a 1-D array .* '
'has changed')
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_zoom_infinity(self):
# Ticket #1419 regression test
dim = 8
ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
def test_zoom_zoomfactor_one(self):
# Ticket #1122 regression test
arr = numpy.zeros((1, 5, 5))
zoom = (1.0, 2.0, 2.0)
out = ndimage.zoom(arr, zoom, cval=7)
ref = numpy.zeros((1, 10, 10))
assert_array_almost_equal(out, ref)
def test_zoom_output_shape_roundoff(self):
arr = numpy.zeros((3, 11, 25))
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
out = ndimage.zoom(arr, zoom)
assert_array_equal(out.shape, (4, 15, 29))
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
@pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
'mirror', 'grid-wrap', 'grid-mirror',
'grid-constant'])
def test_zoom_by_int_order0(self, zoom, mode):
# order 0 zoom should be the same as replication via numpy.kron
# Note: This is not True for general x shapes when grid_mode is False,
# but works here for all modes because the size ratio happens to
# always be an integer when x.shape = (2, 2).
x = numpy.array([[0, 1],
[2, 3]], dtype=float)
# x = numpy.arange(16, dtype=float).reshape(4, 4)
assert_array_almost_equal(
ndimage.zoom(x, zoom, order=0, mode=mode),
numpy.kron(x, numpy.ones(zoom))
)
@pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
@pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
@pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
'grid-wrap', 'grid-constant'])
def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
# When grid_mode is True, order 0 zoom should be the same as
# replication via numpy.kron. The only exceptions to this are the
# non-grid modes 'constant' and 'wrap'.
x = numpy.arange(numpy.prod(shape), dtype=float).reshape(shape)
assert_array_almost_equal(
ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
numpy.kron(x, numpy.ones(zoom))
)
@pytest.mark.parametrize('mode', ['constant', 'wrap'])
def test_zoom_grid_mode_warnings(self, mode):
# Warn on use of non-grid modes when grid_mode is True
x = numpy.arange(9, dtype=float).reshape((3, 3))
with pytest.warns(UserWarning,
match="It is recommended to use mode"):
ndimage.zoom(x, 2, mode=mode, grid_mode=True),
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate01(self, order):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 0, order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate02(self, order):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
def test_rotate03(self, order, dtype):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=dtype)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=dtype)
if data.dtype.kind == 'c':
data -= 1j * data
expected -= 1j * expected
out = ndimage.rotate(data, 90, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate04(self, order):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, reshape=False, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate05(self, order):
data = numpy.empty((4, 3, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate06(self, order):
data = numpy.empty((3, 4, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
out = ndimage.rotate(data, 90, order=order)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate07(self, order):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose([2, 1, 0])
out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate08(self, order):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose()
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
assert_array_almost_equal(out, expected)
def test_rotate09(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
with assert_raises(ValueError):
ndimage.rotate(data, 90, axes=(0, data.ndim))
def test_rotate10(self):
data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
# The output of ndimage.rotate before refactoring
expected = numpy.array([[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[6.54914793, 7.54914793, 8.54914793],
[10.84520162, 11.84520162, 12.84520162],
[0.0, 0.0, 0.0]],
[[6.19286575, 7.19286575, 8.19286575],
[13.4730712, 14.4730712, 15.4730712],
[21.0, 22.0, 23.0],
[28.5269288, 29.5269288, 30.5269288],
[35.80713425, 36.80713425, 37.80713425]],
[[0.0, 0.0, 0.0],
[31.15479838, 32.15479838, 33.15479838],
[35.45085207, 36.45085207, 37.45085207],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]])
out = ndimage.rotate(data, angle=12, reshape=False)
assert_array_almost_equal(out, expected)
def test_rotate_exact_180(self):
a = numpy.tile(numpy.arange(5), (5, 1))
b = ndimage.rotate(ndimage.rotate(a, 180), -180)
assert_equal(a, b)
def test_zoom_output_shape():
"""Ticket #643"""
x = numpy.arange(12).reshape((3, 4))
ndimage.zoom(x, 2, output=numpy.zeros((6, 8)))
| 54,771
| 40.243976
| 79
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_morphology.py
|
import numpy
import numpy as np
from numpy.testing import (assert_, assert_equal, assert_array_equal,
assert_array_almost_equal)
import pytest
from pytest import raises as assert_raises
from scipy import ndimage
from . import types
class TestNdimageMorphology:
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf01(self, dtype):
# brute force (bf) distance transform
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(data, 'euclidean',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf02(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(data, 'cityblock',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 2, 3, 2, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 1, 3, 3, 3, 3],
[4, 4, 4, 4, 7, 4, 4, 4, 4],
[5, 5, 6, 7, 7, 7, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(expected, ft)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf03(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(data, 'chessboard',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 4, 2, 2, 2, 4, 3, 3],
[4, 4, 5, 6, 6, 6, 5, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 4, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf04(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
tdt, tft = ndimage.distance_transform_bf(data, return_indices=1)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_bf(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_bf(
data, return_distances=False, return_indices=1)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_bf(
data, return_indices=1)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_bf(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_bf(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(
data, distances=dt, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf05(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(
data, 'euclidean', return_indices=True, sampling=[2, 2])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 4, 16, 32, 16, 4, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_bf06(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_bf(
data, 'euclidean', return_indices=True, sampling=[2, 1])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 4, 9, 4, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 2, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4, 4],
[5, 5, 5, 5, 6, 5, 5, 5, 5],
[6, 6, 6, 6, 7, 6, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 6, 6, 6, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 1, 1, 7, 7, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf07(self):
# test input validation per discussion on PR #13302
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
with assert_raises(RuntimeError):
ndimage.distance_transform_bf(
data, return_distances=False, return_indices=False
)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt01(self, dtype):
# chamfer type distance (cdt) transform
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_cdt(
data, 'cityblock', return_indices=True)
bf = ndimage.distance_transform_bf(data, 'cityblock')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 1, 1, 1, 2, 3, 3],
[4, 4, 4, 4, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 7, 7, 6, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt02(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'chessboard')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 2, 1, 2, 2, 3, 3],
[4, 4, 3, 2, 2, 2, 3, 4, 4],
[5, 5, 4, 6, 7, 6, 4, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 3, 4, 6, 7, 8],
[0, 1, 1, 2, 2, 6, 6, 7, 8],
[0, 1, 1, 1, 2, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt03(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_cdt(
data, return_distances=False, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_cdt(
data, return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = ndimage.distance_transform_cdt(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_cdt(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_cdt04(self):
# test input validation per discussion on PR #13302
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
indices_out = numpy.zeros((data.ndim,) + data.shape, dtype=numpy.int32)
with assert_raises(RuntimeError):
ndimage.distance_transform_bf(
data,
return_distances=True,
return_indices=False,
indices=indices_out
)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_cdt05(self, dtype):
# test custom metric type per dicussion on issue #17381
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
metric_arg = np.ones((3, 3))
actual = ndimage.distance_transform_cdt(data, metric=metric_arg)
assert actual.sum() == -21
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt01(self, dtype):
# euclidean distance transform (edt)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out, ft = ndimage.distance_transform_edt(data, return_indices=True)
bf = ndimage.distance_transform_bf(data, 'euclidean')
assert_array_almost_equal(bf, out)
dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
dt = dt.astype(numpy.float64)
numpy.multiply(dt, dt, dt)
dt = numpy.add.reduce(dt, axis=0)
numpy.sqrt(dt, dt)
assert_array_almost_equal(bf, dt)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt02(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
tdt, tft = ndimage.distance_transform_edt(data, return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_edt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_edt(
data, return_distances=0, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(
data, return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_edt(
data, return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_edt(
data, distances=dt, return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_edt(
data, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(
data, distances=dt, return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt03(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2])
out = ndimage.distance_transform_edt(data, sampling=[2, 2])
assert_array_almost_equal(ref, out)
@pytest.mark.parametrize('dtype', types)
def test_distance_transform_edt4(self, dtype):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1])
out = ndimage.distance_transform_edt(data, sampling=[2, 1])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt5(self):
# Ticket #954 regression test
out = ndimage.distance_transform_edt(False)
assert_array_almost_equal(out, [0.])
def test_distance_transform_edt6(self):
# test input validation per discussion on PR #13302
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
distances_out = numpy.zeros(data.shape, dtype=numpy.float64)
with assert_raises(RuntimeError):
ndimage.distance_transform_bf(
data,
return_indices=True,
return_distances=False,
distances=distances_out
)
def test_generate_structure01(self):
struct = ndimage.generate_binary_structure(0, 1)
assert_array_almost_equal(struct, 1)
def test_generate_structure02(self):
struct = ndimage.generate_binary_structure(1, 1)
assert_array_almost_equal(struct, [1, 1, 1])
def test_generate_structure03(self):
struct = ndimage.generate_binary_structure(2, 1)
assert_array_almost_equal(struct, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_generate_structure04(self):
struct = ndimage.generate_binary_structure(2, 2)
assert_array_almost_equal(struct, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_iterate_structure01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_iterate_structure02(self):
struct = [[0, 1],
[1, 1],
[0, 1]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
def test_iterate_structure03(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2, 1)
expected = [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]]
assert_array_almost_equal(out[0], expected)
assert_equal(out[1], [2, 2])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion01(self, dtype):
data = numpy.ones([], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion02(self, dtype):
data = numpy.ones([], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion03(self, dtype):
data = numpy.ones([1], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion04(self, dtype):
data = numpy.ones([1], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion05(self, dtype):
data = numpy.ones([3], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion06(self, dtype):
data = numpy.ones([3], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion07(self, dtype):
data = numpy.ones([5], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion08(self, dtype):
data = numpy.ones([5], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion09(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 0, 0, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion10(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 0, 0, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion11(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion12(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion13(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct, border_value=1, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion14(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion15(self, dtype):
data = numpy.ones([5], dtype)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
assert_array_almost_equal(out, [1, 0, 0, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion16(self, dtype):
data = numpy.ones([1, 1], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion17(self, dtype):
data = numpy.ones([1, 1], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion18(self, dtype):
data = numpy.ones([1, 3], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion19(self, dtype):
data = numpy.ones([1, 3], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion20(self, dtype):
data = numpy.ones([3, 3], dtype)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion21(self, dtype):
data = numpy.ones([3, 3], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion22(self, dtype):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion23(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion24(self, dtype):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion25(self, dtype):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_erosion26(self, dtype):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_erosion(data, struct, border_value=1,
origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion27(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct, border_value=1,
iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion28(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion29(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_erosion30(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=out)
assert_array_almost_equal(out, expected)
# test with output memory overlap
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=data)
assert_array_almost_equal(data, expected)
def test_binary_erosion31(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion32(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask, iterations=-1)
assert_array_almost_equal(out, expected)
def test_binary_erosion34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion35(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
tmp = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out,
origin=(-1, -1), mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion36(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = ndimage.binary_erosion(data, struct, mask=mask,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion37(self):
a = numpy.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=bool)
b = numpy.zeros_like(a)
out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0,
border_value=True, brute_force=True)
assert_(out is b)
assert_array_equal(
ndimage.binary_erosion(a, structure=a, iterations=0,
border_value=True),
b)
def test_binary_erosion38(self):
data = numpy.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=bool)
iterations = 2.0
with assert_raises(TypeError):
_ = ndimage.binary_erosion(data, iterations=iterations)
def test_binary_erosion39(self):
iterations = numpy.int32(3)
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=iterations, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion40(self):
iterations = numpy.int64(3)
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=iterations, output=out)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation01(self, dtype):
data = numpy.ones([], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 1)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation02(self, dtype):
data = numpy.zeros([], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 0)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation03(self, dtype):
data = numpy.ones([1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation04(self, dtype):
data = numpy.zeros([1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation05(self, dtype):
data = numpy.ones([3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation06(self, dtype):
data = numpy.zeros([3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation07(self, dtype):
data = numpy.zeros([3], dtype)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation08(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
data[3] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation09(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation10(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
out = ndimage.binary_dilation(data, origin=-1)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation11(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
out = ndimage.binary_dilation(data, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation12(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [1, 0, 1, 0, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation13(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct, border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation14(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct, origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation15(self, dtype):
data = numpy.zeros([5], dtype)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation16(self, dtype):
data = numpy.ones([1, 1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation17(self, dtype):
data = numpy.zeros([1, 1], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation18(self, dtype):
data = numpy.ones([1, 3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation19(self, dtype):
data = numpy.ones([3, 3], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation20(self, dtype):
data = numpy.zeros([3, 3], dtype)
data[1, 1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation21(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
data = numpy.zeros([3, 3], dtype)
data[1, 1] = 1
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation22(self, dtype):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation23(self, dtype):
expected = [[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation24(self, dtype):
expected = [[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, origin=(1, 1))
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation25(self, dtype):
expected = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation26(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation27(self, dtype):
struct = [[0, 1],
[1, 1]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation28(self, dtype):
expected = [[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation29(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_dilation30(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation31(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_dilation32(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=3, output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct, iterations=-1,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_dilation34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_dilation(data, struct, iterations=-1,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_dilation35(self, dtype):
tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_dilation(data, mask=mask,
origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_propagation01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_propagation02(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_opening01(self, dtype):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_opening(data)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_opening02(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_opening(data, struct)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_closing01(self, dtype):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_closing(data)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('dtype', types)
def test_binary_closing02(self, dtype):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_closing(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes01(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes02(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes03(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_grey_erosion01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.grey_erosion(array, footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion01_overlap(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
ndimage.grey_erosion(array, footprint=footprint, output=array)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], array)
def test_grey_erosion02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[1, 1, 0, 0, 0],
[1, 2, 0, 2, 0],
[4, 4, 2, 2, 0]], output)
def test_grey_dilation01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
output = ndimage.grey_dilation(array, footprint=footprint)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
assert_array_almost_equal([[8, 8, 10, 10, 6],
[8, 10, 9, 10, 8],
[9, 9, 9, 8, 8]], output)
def test_grey_opening01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint)
expected = ndimage.grey_dilation(tmp, footprint=footprint)
output = ndimage.grey_opening(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_opening02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_dilation(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_grey_closing01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint)
expected = ndimage.grey_erosion(tmp, footprint=footprint)
output = ndimage.grey_closing(array, footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_closing02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_erosion(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_gradient01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_gradient(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_gradient02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = ndimage.morphological_gradient(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_laplace01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_laplace(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_laplace02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = ndimage.morphological_laplace(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = numpy.zeros(array.shape, array.dtype)
ndimage.white_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_white_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = ndimage.white_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat03(self):
array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
expected = numpy.array([[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_)
output = ndimage.white_tophat(array, structure=structure)
assert_array_equal(expected, output)
def test_white_tophat04(self):
array = numpy.eye(5, dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
# Check that type mismatch is properly handled
output = numpy.empty_like(array, dtype=numpy.float64)
ndimage.white_tophat(array, structure=structure, output=output)
def test_black_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = numpy.zeros(array.shape, array.dtype)
ndimage.black_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_black_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = ndimage.black_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_black_tophat03(self):
array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
expected = numpy.array([[0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_)
output = ndimage.black_tophat(array, structure=structure)
assert_array_equal(expected, output)
def test_black_tophat04(self):
array = numpy.eye(5, dtype=numpy.bool_)
structure = numpy.ones((3, 3), dtype=numpy.bool_)
# Check that type mismatch is properly handled
output = numpy.empty_like(array, dtype=numpy.float64)
ndimage.black_tophat(array, structure=structure, output=output)
@pytest.mark.parametrize('dtype', types)
def test_hit_or_miss01(self, dtype):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 0, 0, 0]], dtype)
out = numpy.zeros(data.shape, bool)
ndimage.binary_hit_or_miss(data, struct, output=out)
assert_array_almost_equal(expected, out)
@pytest.mark.parametrize('dtype', types)
def test_hit_or_miss02(self, dtype):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_hit_or_miss(data, struct)
assert_array_almost_equal(expected, out)
@pytest.mark.parametrize('dtype', types)
def test_hit_or_miss03(self, dtype):
struct1 = [[0, 0, 0],
[1, 1, 1],
[0, 0, 0]]
struct2 = [[1, 1, 1],
[0, 0, 0],
[1, 1, 1]]
expected = [[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype)
out = ndimage.binary_hit_or_miss(data, struct1, struct2)
assert_array_almost_equal(expected, out)
class TestDilateFix:
def setup_method(self):
# dilation related setup
self.array = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=numpy.uint8)
self.sq3x3 = numpy.ones((3, 3))
dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
self.dilated3x3 = dilated3x3.view(numpy.uint8)
def test_dilation_square_structure(self):
result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
# +1 accounts for difference between grey and binary dilation
assert_array_almost_equal(result, self.dilated3x3 + 1)
def test_dilation_scalar_size(self):
result = ndimage.grey_dilation(self.array, size=3)
assert_array_almost_equal(result, self.dilated3x3)
class TestBinaryOpeningClosing:
def setup_method(self):
a = numpy.zeros((5, 5), dtype=bool)
a[1:4, 1:4] = True
a[4, 4] = True
self.array = a
self.sq3x3 = numpy.ones((3, 3))
self.opened_old = ndimage.binary_opening(self.array, self.sq3x3,
1, None, 0)
self.closed_old = ndimage.binary_closing(self.array, self.sq3x3,
1, None, 0)
def test_opening_new_arguments(self):
opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None,
0, None, 0, False)
assert_array_equal(opened_new, self.opened_old)
def test_closing_new_arguments(self):
closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None,
0, None, 0, False)
assert_array_equal(closed_new, self.closed_old)
def test_binary_erosion_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_erosion, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_erosion, data, iterations=1.5)
def test_binary_dilation_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_dilation, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_dilation, data, iterations=1.5)
def test_binary_opening_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_opening, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_opening, data, iterations=1.5)
def test_binary_closing_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, ndimage.binary_closing, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_closing, data, iterations=1.5)
def test_binary_closing_noninteger_brute_force_passes_when_true():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert ndimage.binary_erosion(
data, iterations=2, brute_force=1.5
) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(1.5))
assert ndimage.binary_erosion(
data, iterations=2, brute_force=0.0
) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(0.0))
@pytest.mark.parametrize(
'function',
['binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing'],
)
@pytest.mark.parametrize('iterations', [1, 5])
@pytest.mark.parametrize('brute_force', [False, True])
def test_binary_input_as_output(function, iterations, brute_force):
rstate = numpy.random.RandomState(123)
data = rstate.randint(low=0, high=2, size=100).astype(bool)
ndi_func = getattr(ndimage, function)
# input data is not modified
data_orig = data.copy()
expected = ndi_func(data, brute_force=brute_force, iterations=iterations)
assert_array_equal(data, data_orig)
# data should now contain the expected result
ndi_func(data, brute_force=brute_force, iterations=iterations, output=data)
assert_array_equal(expected, data)
def test_binary_hit_or_miss_input_as_output():
rstate = numpy.random.RandomState(123)
data = rstate.randint(low=0, high=2, size=100).astype(bool)
# input data is not modified
data_orig = data.copy()
expected = ndimage.binary_hit_or_miss(data)
assert_array_equal(data, data_orig)
# data should now contain the expected result
ndimage.binary_hit_or_miss(data, output=data)
assert_array_equal(expected, data)
def test_distance_transform_cdt_invalid_metric():
msg = 'invalid metric provided'
with pytest.raises(ValueError, match=msg):
ndimage.distance_transform_cdt(np.ones((5, 5)),
metric="garbage")
| 106,686
| 43.527129
| 79
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_fourier.py
|
import numpy
from numpy import fft
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
import pytest
from scipy import ndimage
class TestNdimageFourier:
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.float32, 6), (numpy.float64, 14)])
def test_fourier_gaussian_real01(self, shape, dtype, dec):
a = numpy.zeros(shape, dtype)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1, decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.complex64, 6), (numpy.complex128, 14)])
def test_fourier_gaussian_complex01(self, shape, dtype, dec):
a = numpy.zeros(shape, dtype)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.float32, 6), (numpy.float64, 14)])
def test_fourier_uniform_real01(self, shape, dtype, dec):
a = numpy.zeros(shape, dtype)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.complex64, 6), (numpy.complex128, 14)])
def test_fourier_uniform_complex01(self, shape, dtype, dec):
a = numpy.zeros(shape, dtype)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.float32, 4), (numpy.float64, 11)])
def test_fourier_shift_real01(self, shape, dtype, dec):
expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
expected.shape = shape
a = fft.rfft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1],
decimal=dec)
assert_array_almost_equal(a.imag, numpy.zeros(shape),
decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.complex64, 6), (numpy.complex128, 11)])
def test_fourier_shift_complex01(self, shape, dtype, dec):
expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
expected.shape = shape
a = fft.fft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1],
decimal=dec)
assert_array_almost_equal(a.imag, numpy.zeros(shape),
decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.float32, 5), (numpy.float64, 14)])
def test_fourier_ellipsoid_real01(self, shape, dtype, dec):
a = numpy.zeros(shape, dtype)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec',
[(numpy.complex64, 5), (numpy.complex128, 14)])
def test_fourier_ellipsoid_complex01(self, shape, dtype, dec):
a = numpy.zeros(shape, dtype)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec)
def test_fourier_ellipsoid_unimplemented_ndim(self):
# arrays with ndim > 3 raise NotImplementedError
x = numpy.ones((4, 6, 8, 10), dtype=numpy.complex128)
with pytest.raises(NotImplementedError):
ndimage.fourier_ellipsoid(x, 3)
def test_fourier_ellipsoid_1d_complex(self):
# expected result of 1d ellipsoid is the same as for fourier_uniform
for shape in [(32, ), (31, )]:
for type_, dec in zip([numpy.complex64, numpy.complex128],
[5, 14]):
x = numpy.ones(shape, dtype=type_)
a = ndimage.fourier_ellipsoid(x, 5, -1, 0)
b = ndimage.fourier_uniform(x, 5, -1, 0)
assert_array_almost_equal(a, b, decimal=dec)
@pytest.mark.parametrize('shape', [(0, ), (0, 10), (10, 0)])
@pytest.mark.parametrize('dtype',
[numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128])
@pytest.mark.parametrize('test_func',
[ndimage.fourier_ellipsoid,
ndimage.fourier_gaussian,
ndimage.fourier_uniform])
def test_fourier_zero_length_dims(self, shape, dtype, test_func):
a = numpy.ones(shape, dtype)
b = test_func(a, 3)
assert_equal(a, b)
| 6,664
| 42.848684
| 76
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_datatypes.py
|
""" Testing data types for ndimage calls
"""
import sys
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_
import pytest
from scipy import ndimage
def test_map_coordinates_dts():
# check that ndimage accepts different data types for interpolation
data = np.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
shifted_data = np.array([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
idx = np.indices(data.shape)
dts = (np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.intp, np.uintp, np.float32, np.float64)
for order in range(0, 6):
for data_dt in dts:
these_data = data.astype(data_dt)
for coord_dt in dts:
# affine mapping
mat = np.eye(2, dtype=coord_dt)
off = np.zeros((2,), dtype=coord_dt)
out = ndimage.affine_transform(these_data, mat, off)
assert_array_almost_equal(these_data, out)
# map coordinates
coords_m1 = idx.astype(coord_dt) - 1
coords_p10 = idx.astype(coord_dt) + 10
out = ndimage.map_coordinates(these_data, coords_m1, order=order)
assert_array_almost_equal(out, shifted_data)
# check constant fill works
out = ndimage.map_coordinates(these_data, coords_p10, order=order)
assert_array_almost_equal(out, np.zeros((3,4)))
# check shift and zoom
out = ndimage.shift(these_data, 1)
assert_array_almost_equal(out, shifted_data)
out = ndimage.zoom(these_data, 1)
assert_array_almost_equal(these_data, out)
@pytest.mark.xfail(not sys.platform == 'darwin', reason="runs only on darwin")
def test_uint64_max():
# Test interpolation respects uint64 max. Reported to fail at least on
# win32 (due to the 32 bit visual C compiler using signed int64 when
# converting between uint64 to double) and Debian on s390x.
# Interpolation is always done in double precision floating point, so
# we use the largest uint64 value for which int(float(big)) still fits
# in a uint64.
big = 2**64 - 1025
arr = np.array([big, big, big], dtype=np.uint64)
# Tests geometric transform (map_coordinates, affine_transform)
inds = np.indices(arr.shape) - 0.1
x = ndimage.map_coordinates(arr, inds)
assert_(x[1] == int(float(big)))
assert_(x[2] == int(float(big)))
# Tests zoom / shift
x = ndimage.shift(arr, 0.1)
assert_(x[1] == int(float(big)))
assert_(x[2] == int(float(big)))
| 2,742
| 39.940299
| 82
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/__init__.py
|
from __future__ import annotations
from typing import List, Type
import numpy
# list of numarray data types
integer_types: list[type] = [
numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
float_types: list[type] = [numpy.float32, numpy.float64]
complex_types: list[type] = [numpy.complex64, numpy.complex128]
types: list[type] = integer_types + float_types
| 426
| 27.466667
| 63
|
py
|
scipy
|
scipy-main/scipy/ndimage/tests/test_c_api.py
|
import numpy as np
from numpy.testing import assert_allclose
from scipy import ndimage
from scipy.ndimage import _ctest
from scipy.ndimage import _cytest
from scipy._lib._ccallback import LowLevelCallable
FILTER1D_FUNCTIONS = [
lambda filter_size: _ctest.filter1d(filter_size),
lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
lambda filter_size: LowLevelCallable(_cytest.filter1d(filter_size, with_signature=True)),
lambda filter_size: LowLevelCallable.from_cython(_cytest, "_filter1d",
_cytest.filter1d_capsule(filter_size)),
]
FILTER2D_FUNCTIONS = [
lambda weights: _ctest.filter2d(weights),
lambda weights: _cytest.filter2d(weights, with_signature=False),
lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
lambda weights: LowLevelCallable.from_cython(_cytest, "_filter2d", _cytest.filter2d_capsule(weights)),
]
TRANSFORM_FUNCTIONS = [
lambda shift: _ctest.transform(shift),
lambda shift: _cytest.transform(shift, with_signature=False),
lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
lambda shift: LowLevelCallable.from_cython(_cytest, "_transform", _cytest.transform_capsule(shift)),
]
def test_generic_filter():
def filter2d(footprint_elements, weights):
return (weights*footprint_elements).sum()
def check(j):
func = FILTER2D_FUNCTIONS[j]
im = np.ones((20, 20))
im[:10,:10] = 0
footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
footprint_size = np.count_nonzero(footprint)
weights = np.ones(footprint_size)/footprint_size
res = ndimage.generic_filter(im, func(weights),
footprint=footprint)
std = ndimage.generic_filter(im, filter2d, footprint=footprint,
extra_arguments=(weights,))
assert_allclose(res, std, err_msg=f"#{j} failed")
for j, func in enumerate(FILTER2D_FUNCTIONS):
check(j)
def test_generic_filter1d():
def filter1d(input_line, output_line, filter_size):
for i in range(output_line.size):
output_line[i] = 0
for j in range(filter_size):
output_line[i] += input_line[i+j]
output_line /= filter_size
def check(j):
func = FILTER1D_FUNCTIONS[j]
im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
filter_size = 3
res = ndimage.generic_filter1d(im, func(filter_size),
filter_size)
std = ndimage.generic_filter1d(im, filter1d, filter_size,
extra_arguments=(filter_size,))
assert_allclose(res, std, err_msg=f"#{j} failed")
for j, func in enumerate(FILTER1D_FUNCTIONS):
check(j)
def test_geometric_transform():
def transform(output_coordinates, shift):
return output_coordinates[0] - shift, output_coordinates[1] - shift
def check(j):
func = TRANSFORM_FUNCTIONS[j]
im = np.arange(12).reshape(4, 3).astype(np.float64)
shift = 0.5
res = ndimage.geometric_transform(im, func(shift))
std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
assert_allclose(res, std, err_msg=f"#{j} failed")
for j, func in enumerate(TRANSFORM_FUNCTIONS):
check(j)
| 3,452
| 35.347368
| 106
|
py
|
scipy
|
scipy-main/scipy/ndimage/utils/generate_label_testvectors.py
|
import numpy as np
from scipy.ndimage import label
def generate_test_vecs(infile, strelfile, resultfile):
"test label with different structuring element neighborhoods"
def bitimage(l):
return np.array([[c for c in s] for s in l]) == '1'
data = [np.ones((7, 7)),
bitimage(["1110111",
"1100011",
"1010101",
"0001000",
"1010101",
"1100011",
"1110111"]),
bitimage(["1011101",
"0001000",
"1001001",
"1111111",
"1001001",
"0001000",
"1011101"])]
strels = [np.ones((3, 3)),
np.zeros((3, 3)),
bitimage(["010", "111", "010"]),
bitimage(["101", "010", "101"]),
bitimage(["100", "010", "001"]),
bitimage(["000", "111", "000"]),
bitimage(["110", "010", "011"]),
bitimage(["110", "111", "011"])]
strels = strels + [np.flipud(s) for s in strels]
strels = strels + [np.rot90(s) for s in strels]
strels = [np.fromstring(s, dtype=int).reshape((3, 3))
for s in {t.astype(int).tobytes() for t in strels}]
inputs = np.vstack(data)
results = np.vstack([label(d, s)[0] for d in data for s in strels])
strels = np.vstack(strels)
np.savetxt(infile, inputs, fmt="%d")
np.savetxt(strelfile, strels, fmt="%d")
np.savetxt(resultfile, results, fmt="%d")
generate_test_vecs("label_inputs.txt", "label_strels.txt", "label_results.txt")
| 1,669
| 36.111111
| 79
|
py
|
scipy
|
scipy-main/scipy/datasets/setup.py
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 346
| 30.545455
| 64
|
py
|
scipy
|
scipy-main/scipy/datasets/_utils.py
|
import os
import shutil
from ._registry import method_files_map
try:
import platformdirs
except ImportError:
platformdirs = None # type: ignore[assignment]
def _clear_cache(datasets, cache_dir=None, method_map=None):
if method_map is None:
# Use SciPy Datasets method map
method_map = method_files_map
if cache_dir is None:
# Use default cache_dir path
if platformdirs is None:
# platformdirs is pooch dependency
raise ImportError("Missing optional dependency 'pooch' required "
"for scipy.datasets module. Please use pip or "
"conda to install 'pooch'.")
cache_dir = platformdirs.user_cache_dir("scipy-data")
if not os.path.exists(cache_dir):
print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.")
return
if datasets is None:
print(f"Cleaning the cache directory {cache_dir}!")
shutil.rmtree(cache_dir)
else:
if not isinstance(datasets, (list, tuple)):
# single dataset method passed should be converted to list
datasets = [datasets, ]
for dataset in datasets:
assert callable(dataset)
dataset_name = dataset.__name__ # Name of the dataset method
if dataset_name not in method_map:
raise ValueError(f"Dataset method {dataset_name} doesn't "
"exist. Please check if the passed dataset "
"is a subset of the following dataset "
f"methods: {list(method_map.keys())}")
data_files = method_map[dataset_name]
data_filepaths = [os.path.join(cache_dir, file)
for file in data_files]
for data_filepath in data_filepaths:
if os.path.exists(data_filepath):
print("Cleaning the file "
f"{os.path.split(data_filepath)[1]} "
f"for dataset {dataset_name}")
os.remove(data_filepath)
else:
print(f"Path {data_filepath} doesn't exist. "
"Nothing to clear.")
def clear_cache(datasets=None):
"""
Cleans the scipy datasets cache directory.
If a scipy.datasets method or a list/tuple of the same is
provided, then clear_cache removes all the data files
associated to the passed dataset method callable(s).
By default, it removes all the cached data files.
Parameters
----------
datasets : callable or list/tuple of callable or None
Examples
--------
>>> from scipy import datasets
>>> ascent_array = datasets.ascent()
>>> ascent_array.shape
(512, 512)
>>> datasets.clear_cache([datasets.ascent])
Cleaning the file ascent.dat for dataset ascent
"""
_clear_cache(datasets)
| 2,967
| 35.195122
| 78
|
py
|
scipy
|
scipy-main/scipy/datasets/_registry.py
|
##########################################################################
# This file serves as the dataset registry for SciPy Datasets SubModule.
##########################################################################
# To generate the SHA256 hash, use the command
# openssl sha256 <filename>
registry = {
"ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2",
"ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf",
"face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886"
}
registry_urls = {
"ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat",
"ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat",
"face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat"
}
# dataset method mapping with their associated filenames
# <method_name> : ["filename1", "filename2", ...]
method_files_map = {
"ascent": ["ascent.dat"],
"electrocardiogram": ["ecg.dat"],
"face": ["face.dat"]
}
| 1,072
| 38.740741
| 91
|
py
|
scipy
|
scipy-main/scipy/datasets/_download_all.py
|
"""
Platform independent script to download all the
`scipy.datasets` module data files.
This doesn't require a full scipy build.
Run: python _download_all.py <download_dir>
"""
import argparse
try:
import pooch
except ImportError:
pooch = None
if __package__ is None or __package__ == '':
# Running as python script, use absolute import
import _registry # type: ignore
else:
# Running as python module, use relative import
from . import _registry
def download_all(path=None):
"""
Utility method to download all the dataset files
for `scipy.datasets` module.
Parameters
----------
path : str, optional
Directory path to download all the dataset files.
If None, default to the system cache_dir detected by pooch.
"""
if pooch is None:
raise ImportError("Missing optional dependency 'pooch' required "
"for scipy.datasets module. Please use pip or "
"conda to install 'pooch'.")
if path is None:
path = pooch.os_cache('scipy-data')
for dataset_name, dataset_hash in _registry.registry.items():
pooch.retrieve(url=_registry.registry_urls[dataset_name],
known_hash=dataset_hash,
fname=dataset_name, path=path)
def main():
parser = argparse.ArgumentParser(description='Download SciPy data files.')
parser.add_argument("path", nargs='?', type=str,
default=pooch.os_cache('scipy-data'),
help="Directory path to download all the data files.")
args = parser.parse_args()
download_all(args.path)
if __name__ == "__main__":
main()
| 1,701
| 28.344828
| 78
|
py
|
scipy
|
scipy-main/scipy/datasets/_fetchers.py
|
from numpy import array, frombuffer, load
from ._registry import registry, registry_urls
try:
import pooch
except ImportError:
pooch = None
data_fetcher = None
else:
data_fetcher = pooch.create(
# Use the default cache folder for the operating system
# Pooch uses appdirs (https://github.com/ActiveState/appdirs) to
# select an appropriate directory for the cache on each platform.
path=pooch.os_cache("scipy-data"),
# The remote data is on Github
# base_url is a required param, even though we override this
# using individual urls in the registry.
base_url="https://github.com/scipy/",
registry=registry,
urls=registry_urls
)
def fetch_data(dataset_name, data_fetcher=data_fetcher):
if data_fetcher is None:
raise ImportError("Missing optional dependency 'pooch' required "
"for scipy.datasets module. Please use pip or "
"conda to install 'pooch'.")
# The "fetch" method returns the full path to the downloaded data file.
return data_fetcher.fetch(dataset_name)
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy
use in demos.
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.datasets
>>> ascent = scipy.datasets.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
# The file will be downloaded automatically the first time this is run,
# returning the path to the downloaded file. Afterwards, Pooch finds
# it in the local cache and doesn't repeat the download.
fname = fetch_data("ascent.dat")
# Now we just need to load it with our standard Python tools.
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def electrocardiogram():
"""
Load an electrocardiogram as an example for a 1-D signal.
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
recording of the heart's electrical activity, sampled at 360 Hz.
Returns
-------
ecg : ndarray
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
Notes
-----
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
heartbeats as well as pathological changes.
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
.. versionadded:: 1.1.0
References
----------
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
(PMID: 11446209); :doi:`10.13026/C2F305`
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
:doi:`10.1161/01.CIR.101.23.e215`
Examples
--------
>>> from scipy.datasets import electrocardiogram
>>> ecg = electrocardiogram()
>>> ecg
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
>>> ecg.shape, ecg.mean(), ecg.std()
((108000,), -0.16510875, 0.5992473991177294)
As stated the signal features several areas with a different morphology.
E.g., the first few seconds show the electrical activity of a heart in
normal sinus rhythm as seen below.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 360
>>> time = np.arange(ecg.size) / fs
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(9, 10.2)
>>> plt.ylim(-1, 1.5)
>>> plt.show()
After second 16, however, the first premature ventricular contractions,
also called extrasystoles, appear. These have a different morphology
compared to typical heartbeats. The difference can easily be observed
in the following plot.
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(46.5, 50)
>>> plt.ylim(-2, 1.5)
>>> plt.show()
At several points large artifacts disturb the recording, e.g.:
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(207, 215)
>>> plt.ylim(-2, 3.5)
>>> plt.show()
Finally, examining the power spectrum reveals that most of the biosignal is
made up of lower frequencies. At 60 Hz the noise induced by the mains
electricity can be clearly observed.
>>> from scipy.signal import welch
>>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
>>> plt.semilogy(f, Pxx)
>>> plt.xlabel("Frequency in Hz")
>>> plt.ylabel("Power spectrum of the ECG in mV**2")
>>> plt.xlim(f[[0, -1]])
>>> plt.show()
"""
fname = fetch_data("ecg.dat")
with load(fname) as file:
ecg = file["ecg"].astype(int) # np.uint16 -> int
# Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
ecg = (ecg - 1024) / 200.0
return ecg
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.datasets
>>> face = scipy.datasets.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
fname = fetch_data("face.dat")
with open(fname, 'rb') as f:
rawdata = f.read()
face_data = bz2.decompress(rawdata)
face = frombuffer(face_data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] +
0.07 * face[:, :, 2]).astype('uint8')
return face
| 6,759
| 29.588235
| 91
|
py
|
scipy
|
scipy-main/scipy/datasets/__init__.py
|
"""
================================
Datasets (:mod:`scipy.datasets`)
================================
.. currentmodule:: scipy.datasets
Dataset Methods
===============
.. autosummary::
:toctree: generated/
ascent
face
electrocardiogram
Utility Methods
===============
.. autosummary::
:toctree: generated/
download_all -- Download all the dataset files to specified path.
clear_cache -- Clear cached dataset directory.
Usage of Datasets
=================
SciPy dataset methods can be simply called as follows: ``'<dataset-name>()'``
This downloads the dataset files over the network once, and saves the cache,
before returning a `numpy.ndarray` object representing the dataset.
Note that the return data structure and data type might be different for
different dataset methods. For a more detailed example on usage, please look
into the particular dataset method documentation above.
How dataset retrieval and storage works
=======================================
SciPy dataset files are stored within individual github repositories under the
SciPy GitHub organization, following a naming convention as
``'dataset-<name>'``, for example `scipy.datasets.face` files live at
https://github.com/scipy/dataset-face. The `scipy.datasets` submodule utilizes
and depends on `Pooch <https://www.fatiando.org/pooch/latest/>`_, a Python
package built to simplify fetching data files. Pooch uses these repos to
retrieve the respective dataset files when calling the dataset function.
A registry of all the datasets, essentially a mapping of filenames with their
SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify
the downloads on function call. After downloading the dataset once, the files
are saved in the system cache directory under ``'scipy-data'``.
Dataset cache locations may vary on different platforms.
For macOS::
'~/Library/Caches/scipy-data'
For Linux and other Unix-like platforms::
'~/.cache/scipy-data' # or the value of the XDG_CACHE_HOME env var, if defined
For Windows::
'C:\\Users\\<user>\\AppData\\Local\\<AppAuthor>\\scipy-data\\Cache'
In environments with constrained network connectivity for various security
reasons or on systems without continuous internet connections, one may manually
load the cache of the datasets by placing the contents of the dataset repo in
the above mentioned cache directory to avoid fetching dataset errors without
the internet connectivity.
"""
from ._fetchers import face, ascent, electrocardiogram # noqa: E402
from ._download_all import download_all
from ._utils import clear_cache
__all__ = ['ascent', 'electrocardiogram', 'face',
'download_all', 'clear_cache']
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 2,816
| 29.956044
| 83
|
py
|
scipy
|
scipy-main/scipy/datasets/tests/test_data.py
|
from scipy.datasets._registry import registry
from scipy.datasets._fetchers import data_fetcher
from scipy.datasets._utils import _clear_cache
from scipy.datasets import ascent, face, electrocardiogram, download_all
from numpy.testing import assert_equal, assert_almost_equal
import os
import pytest
try:
import pooch
except ImportError:
raise ImportError("Missing optional dependency 'pooch' required "
"for scipy.datasets module. Please use pip or "
"conda to install 'pooch'.")
data_dir = data_fetcher.path # type: ignore
def _has_hash(path, expected_hash):
"""Check if the provided path has the expected hash."""
if not os.path.exists(path):
return False
return pooch.file_hash(path) == expected_hash
class TestDatasets:
@pytest.fixture(scope='module', autouse=True)
def test_download_all(self):
# This fixture requires INTERNET CONNECTION
# test_setup phase
download_all()
yield
def test_existence_all(self):
assert len(os.listdir(data_dir)) >= len(registry)
def test_ascent(self):
assert_equal(ascent().shape, (512, 512))
# hash check
assert _has_hash(os.path.join(data_dir, "ascent.dat"),
registry["ascent.dat"])
def test_face(self):
assert_equal(face().shape, (768, 1024, 3))
# hash check
assert _has_hash(os.path.join(data_dir, "face.dat"),
registry["face.dat"])
def test_electrocardiogram(self):
# Test shape, dtype and stats of signal
ecg = electrocardiogram()
assert_equal(ecg.dtype, float)
assert_equal(ecg.shape, (108000,))
assert_almost_equal(ecg.mean(), -0.16510875)
assert_almost_equal(ecg.std(), 0.5992473991177294)
# hash check
assert _has_hash(os.path.join(data_dir, "ecg.dat"),
registry["ecg.dat"])
def test_clear_cache(tmp_path):
# Note: `tmp_path` is a pytest fixture, it handles cleanup
dummy_basepath = tmp_path / "dummy_cache_dir"
dummy_basepath.mkdir()
# Create three dummy dataset files for dummy dataset methods
dummy_method_map = {}
for i in range(4):
dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
data_filepath = dummy_basepath / f"data{i}.dat"
data_filepath.write_text("")
# clear files associated to single dataset method data0
# also test callable argument instead of list of callables
def data0():
pass
_clear_cache(datasets=data0, cache_dir=dummy_basepath,
method_map=dummy_method_map)
assert not os.path.exists(dummy_basepath/"data0.dat")
# clear files associated to multiple dataset methods "data3" and "data4"
def data1():
pass
def data2():
pass
_clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
method_map=dummy_method_map)
assert not os.path.exists(dummy_basepath/"data1.dat")
assert not os.path.exists(dummy_basepath/"data2.dat")
# clear multiple dataset files "data3_0.dat" and "data3_1.dat"
# associated with dataset method "data3"
def data4():
pass
# create files
(dummy_basepath / "data4_0.dat").write_text("")
(dummy_basepath / "data4_1.dat").write_text("")
dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
_clear_cache(datasets=[data4], cache_dir=dummy_basepath,
method_map=dummy_method_map)
assert not os.path.exists(dummy_basepath/"data4_0.dat")
assert not os.path.exists(dummy_basepath/"data4_1.dat")
# wrong dataset method should raise ValueError since it
# doesn't exist in the dummy_method_map
def data5():
pass
with pytest.raises(ValueError):
_clear_cache(datasets=[data5], cache_dir=dummy_basepath,
method_map=dummy_method_map)
# remove all dataset cache
_clear_cache(datasets=None, cache_dir=dummy_basepath)
assert not os.path.exists(dummy_basepath)
| 4,064
| 31.782258
| 76
|
py
|
scipy
|
scipy-main/scipy/datasets/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/linalg/_decomp_cossin.py
|
from collections.abc import Iterable
import numpy as np
from scipy._lib._util import _asarray_validated
from scipy.linalg import block_diag, LinAlgError
from .lapack import _compute_lwork, get_lapack_funcs
__all__ = ['cossin']
def cossin(X, p=None, q=None, separate=False,
swap_sign=False, compute_u=True, compute_vh=True):
"""
Compute the cosine-sine (CS) decomposition of an orthogonal/unitary matrix.
X is an ``(m, m)`` orthogonal/unitary matrix, partitioned as the following
where upper left block has the shape of ``(p, q)``::
┌ ┐
│ I 0 0 │ 0 0 0 │
┌ ┐ ┌ ┐│ 0 C 0 │ 0 -S 0 │┌ ┐*
│ X11 │ X12 │ │ U1 │ ││ 0 0 0 │ 0 0 -I ││ V1 │ │
│ ────┼──── │ = │────┼────││─────────┼─────────││────┼────│
│ X21 │ X22 │ │ │ U2 ││ 0 0 0 │ I 0 0 ││ │ V2 │
└ ┘ └ ┘│ 0 S 0 │ 0 C 0 │└ ┘
│ 0 0 I │ 0 0 0 │
└ ┘
``U1``, ``U2``, ``V1``, ``V2`` are square orthogonal/unitary matrices of
dimensions ``(p,p)``, ``(m-p,m-p)``, ``(q,q)``, and ``(m-q,m-q)``
respectively, and ``C`` and ``S`` are ``(r, r)`` nonnegative diagonal
matrices satisfying ``C^2 + S^2 = I`` where ``r = min(p, m-p, q, m-q)``.
Moreover, the rank of the identity matrices are ``min(p, q) - r``,
``min(p, m - q) - r``, ``min(m - p, q) - r``, and ``min(m - p, m - q) - r``
respectively.
X can be supplied either by itself and block specifications p, q or its
subblocks in an iterable from which the shapes would be derived. See the
examples below.
Parameters
----------
X : array_like, iterable
complex unitary or real orthogonal matrix to be decomposed, or iterable
of subblocks ``X11``, ``X12``, ``X21``, ``X22``, when ``p``, ``q`` are
omitted.
p : int, optional
Number of rows of the upper left block ``X11``, used only when X is
given as an array.
q : int, optional
Number of columns of the upper left block ``X11``, used only when X is
given as an array.
separate : bool, optional
if ``True``, the low level components are returned instead of the
matrix factors, i.e. ``(u1,u2)``, ``theta``, ``(v1h,v2h)`` instead of
``u``, ``cs``, ``vh``.
swap_sign : bool, optional
if ``True``, the ``-S``, ``-I`` block will be the bottom left,
otherwise (by default) they will be in the upper right block.
compute_u : bool, optional
if ``False``, ``u`` won't be computed and an empty array is returned.
compute_vh : bool, optional
if ``False``, ``vh`` won't be computed and an empty array is returned.
Returns
-------
u : ndarray
When ``compute_u=True``, contains the block diagonal orthogonal/unitary
matrix consisting of the blocks ``U1`` (``p`` x ``p``) and ``U2``
(``m-p`` x ``m-p``) orthogonal/unitary matrices. If ``separate=True``,
this contains the tuple of ``(U1, U2)``.
cs : ndarray
The cosine-sine factor with the structure described above.
If ``separate=True``, this contains the ``theta`` array containing the
angles in radians.
vh : ndarray
When ``compute_vh=True`, contains the block diagonal orthogonal/unitary
matrix consisting of the blocks ``V1H`` (``q`` x ``q``) and ``V2H``
(``m-q`` x ``m-q``) orthogonal/unitary matrices. If ``separate=True``,
this contains the tuple of ``(V1H, V2H)``.
References
----------
.. [1] Brian D. Sutton. Computing the complete CS decomposition. Numer.
Algorithms, 50(1):33-65, 2009.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import cossin
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(4)
>>> u, cs, vdh = cossin(x, p=2, q=2)
>>> np.allclose(x, u @ cs @ vdh)
True
Same can be entered via subblocks without the need of ``p`` and ``q``. Also
let's skip the computation of ``u``
>>> ue, cs, vdh = cossin((x[:2, :2], x[:2, 2:], x[2:, :2], x[2:, 2:]),
... compute_u=False)
>>> print(ue)
[]
>>> np.allclose(x, u @ cs @ vdh)
True
"""
if p or q:
p = 1 if p is None else int(p)
q = 1 if q is None else int(q)
X = _asarray_validated(X, check_finite=True)
if not np.equal(*X.shape):
raise ValueError("Cosine Sine decomposition only supports square"
" matrices, got {}".format(X.shape))
m = X.shape[0]
if p >= m or p <= 0:
raise ValueError("invalid p={}, 0<p<{} must hold"
.format(p, X.shape[0]))
if q >= m or q <= 0:
raise ValueError("invalid q={}, 0<q<{} must hold"
.format(q, X.shape[0]))
x11, x12, x21, x22 = X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:]
elif not isinstance(X, Iterable):
raise ValueError("When p and q are None, X must be an Iterable"
" containing the subblocks of X")
else:
if len(X) != 4:
raise ValueError("When p and q are None, exactly four arrays"
" should be in X, got {}".format(len(X)))
x11, x12, x21, x22 = (np.atleast_2d(x) for x in X)
for name, block in zip(["x11", "x12", "x21", "x22"],
[x11, x12, x21, x22]):
if block.shape[1] == 0:
raise ValueError(f"{name} can't be empty")
p, q = x11.shape
mmp, mmq = x22.shape
if x12.shape != (p, mmq):
raise ValueError("Invalid x12 dimensions: desired {}, "
"got {}".format((p, mmq), x12.shape))
if x21.shape != (mmp, q):
raise ValueError("Invalid x21 dimensions: desired {}, "
"got {}".format((mmp, q), x21.shape))
if p + mmp != q + mmq:
raise ValueError("The subblocks have compatible sizes but "
"don't form a square array (instead they form a"
" {}x{} array). This might be due to missing "
"p, q arguments.".format(p + mmp, q + mmq))
m = p + mmp
cplx = any([np.iscomplexobj(x) for x in [x11, x12, x21, x22]])
driver = "uncsd" if cplx else "orcsd"
csd, csd_lwork = get_lapack_funcs([driver, driver + "_lwork"],
[x11, x12, x21, x22])
lwork = _compute_lwork(csd_lwork, m=m, p=p, q=q)
lwork_args = ({'lwork': lwork[0], 'lrwork': lwork[1]} if cplx else
{'lwork': lwork})
*_, theta, u1, u2, v1h, v2h, info = csd(x11=x11, x12=x12, x21=x21, x22=x22,
compute_u1=compute_u,
compute_u2=compute_u,
compute_v1t=compute_vh,
compute_v2t=compute_vh,
trans=False, signs=swap_sign,
**lwork_args)
method_name = csd.typecode + driver
if info < 0:
raise ValueError('illegal value in argument {} of internal {}'
.format(-info, method_name))
if info > 0:
raise LinAlgError(f"{method_name} did not converge: {info}")
if separate:
return (u1, u2), theta, (v1h, v2h)
U = block_diag(u1, u2)
VDH = block_diag(v1h, v2h)
# Construct the middle factor CS
c = np.diag(np.cos(theta))
s = np.diag(np.sin(theta))
r = min(p, q, m - p, m - q)
n11 = min(p, q) - r
n12 = min(p, m - q) - r
n21 = min(m - p, q) - r
n22 = min(m - p, m - q) - r
Id = np.eye(np.max([n11, n12, n21, n22, r]), dtype=theta.dtype)
CS = np.zeros((m, m), dtype=theta.dtype)
CS[:n11, :n11] = Id[:n11, :n11]
xs = n11 + r
xe = n11 + r + n12
ys = n11 + n21 + n22 + 2 * r
ye = n11 + n21 + n22 + 2 * r + n12
CS[xs: xe, ys:ye] = Id[:n12, :n12] if swap_sign else -Id[:n12, :n12]
xs = p + n22 + r
xe = p + n22 + r + + n21
ys = n11 + r
ye = n11 + r + n21
CS[xs:xe, ys:ye] = -Id[:n21, :n21] if swap_sign else Id[:n21, :n21]
CS[p:p + n22, q:q + n22] = Id[:n22, :n22]
CS[n11:n11 + r, n11:n11 + r] = c
CS[p + n22:p + n22 + r, r + n21 + n22:2 * r + n21 + n22] = c
xs = n11
xe = n11 + r
ys = n11 + n21 + n22 + r
ye = n11 + n21 + n22 + 2 * r
CS[xs:xe, ys:ye] = s if swap_sign else -s
CS[p + n22:p + n22 + r, n11:n11 + r] = -s if swap_sign else s
return U, CS, VDH
| 8,882
| 38.65625
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_testutils.py
|
import numpy as np
class _FakeMatrix:
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class _FakeMatrix2:
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
def _get_array(shape, dtype):
"""
Get a test array of given shape and data type.
Returned NxN matrices are posdef, and 2xN are banded-posdef.
"""
if len(shape) == 2 and shape[0] == 2:
# yield a banded positive definite one
x = np.zeros(shape, dtype=dtype)
x[0, 1:] = -1
x[1] = 2
return x
elif len(shape) == 2 and shape[0] == shape[1]:
# always yield a positive definite matrix
x = np.zeros(shape, dtype=dtype)
j = np.arange(shape[0])
x[j, j] = 2
x[j[:-1], j[:-1]+1] = -1
x[j[:-1]+1, j[:-1]] = -1
return x
else:
np.random.seed(1234)
return np.random.randn(*shape).astype(dtype)
def _id(x):
return x
def assert_no_overwrite(call, shapes, dtypes=None):
"""
Test that a call does not overwrite its input arguments
"""
if dtypes is None:
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in dtypes:
for order in ["C", "F"]:
for faker in [_id, _FakeMatrix, _FakeMatrix2]:
orig_inputs = [_get_array(s, dtype) for s in shapes]
inputs = [faker(x.copy(order)) for x in orig_inputs]
call(*inputs)
msg = f"call modified inputs [{dtype!r}, {faker!r}]"
for a, b in zip(inputs, orig_inputs):
np.testing.assert_equal(a, b, err_msg=msg)
| 1,730
| 26.046875
| 70
|
py
|
scipy
|
scipy-main/scipy/linalg/_generate_pyx.py
|
"""
Code generator script to make the Cython BLAS and LAPACK wrappers
from the files "cython_blas_signatures.txt" and
"cython_lapack_signatures.txt" which contain the signatures for
all the BLAS/LAPACK routines that should be included in the wrappers.
"""
from collections import defaultdict
from operator import itemgetter
import os
from stat import ST_MTIME
import argparse
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
fortran_types = {'int': 'integer',
'c': 'complex',
'd': 'double precision',
's': 'real',
'z': 'complex*16',
'char': 'character',
'bint': 'logical'}
c_types = {'int': 'int',
'c': 'npy_complex64',
'd': 'double',
's': 'float',
'z': 'npy_complex128',
'char': 'char',
'bint': 'int',
'cselect1': '_cselect1',
'cselect2': '_cselect2',
'dselect2': '_dselect2',
'dselect3': '_dselect3',
'sselect2': '_sselect2',
'sselect3': '_sselect3',
'zselect1': '_zselect1',
'zselect2': '_zselect2'}
def arg_names_and_types(args):
return zip(*[arg.split(' *') for arg in args.split(', ')])
pyx_func_template = """
cdef extern from "{header_name}":
void _fortran_{name} "F_FUNC({name}wrp, {upname}WRP)"({ret_type} *out, {fort_args}) nogil
cdef {ret_type} {name}({args}) noexcept nogil:
cdef {ret_type} out
_fortran_{name}(&out, {argnames})
return out
"""
npy_types = {'c': 'npy_complex64', 'z': 'npy_complex128',
'cselect1': '_cselect1', 'cselect2': '_cselect2',
'dselect2': '_dselect2', 'dselect3': '_dselect3',
'sselect2': '_sselect2', 'sselect3': '_sselect3',
'zselect1': '_zselect1', 'zselect2': '_zselect2'}
def arg_casts(arg):
if arg in ['npy_complex64', 'npy_complex128', '_cselect1', '_cselect2',
'_dselect2', '_dselect3', '_sselect2', '_sselect3',
'_zselect1', '_zselect2']:
return f'<{arg}*>'
return ''
def pyx_decl_func(name, ret_type, args, header_name):
argtypes, argnames = arg_names_and_types(args)
# Fix the case where one of the arguments has the same name as the
# abbreviation for the argument type.
# Otherwise the variable passed as an argument is considered overwrites
# the previous typedef and Cython compilation fails.
if ret_type in argnames:
argnames = [n if n != ret_type else ret_type + '_' for n in argnames]
argnames = [n if n not in ['lambda', 'in'] else n + '_'
for n in argnames]
args = ', '.join([' *'.join([n, t])
for n, t in zip(argtypes, argnames)])
argtypes = [npy_types.get(t, t) for t in argtypes]
fort_args = ', '.join([' *'.join([n, t])
for n, t in zip(argtypes, argnames)])
argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)]
argnames = ', '.join(argnames)
c_ret_type = c_types[ret_type]
args = args.replace('lambda', 'lambda_')
return pyx_func_template.format(name=name, upname=name.upper(), args=args,
fort_args=fort_args, ret_type=ret_type,
c_ret_type=c_ret_type, argnames=argnames,
header_name=header_name)
pyx_sub_template = """cdef extern from "{header_name}":
void _fortran_{name} "F_FUNC({name},{upname})"({fort_args}) nogil
cdef void {name}({args}) noexcept nogil:
_fortran_{name}({argnames})
"""
def pyx_decl_sub(name, args, header_name):
argtypes, argnames = arg_names_and_types(args)
argtypes = [npy_types.get(t, t) for t in argtypes]
argnames = [n if n not in ['lambda', 'in'] else n + '_' for n in argnames]
fort_args = ', '.join([' *'.join([n, t])
for n, t in zip(argtypes, argnames)])
argnames = [arg_casts(t) + n for n, t in zip(argnames, argtypes)]
argnames = ', '.join(argnames)
args = args.replace('*lambda,', '*lambda_,').replace('*in,', '*in_,')
return pyx_sub_template.format(name=name, upname=name.upper(),
args=args, fort_args=fort_args,
argnames=argnames, header_name=header_name)
blas_pyx_preamble = '''# cython: boundscheck = False
# cython: wraparound = False
# cython: cdivision = True
"""
BLAS Functions for Cython
=========================
Usable from Cython via::
cimport scipy.linalg.cython_blas
These wrappers do not check for alignment of arrays.
Alignment should be checked before these wrappers are used.
Raw function pointers (Fortran-style pointer arguments):
- {}
"""
# Within SciPy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_blas
# from scipy.linalg cimport cython_blas
# cimport scipy.linalg.cython_blas as cython_blas
# cimport ..linalg.cython_blas as cython_blas
# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
cdef extern from "fortran_defs.h":
pass
from numpy cimport npy_complex64, npy_complex128
'''
def make_blas_pyx_preamble(all_sigs):
names = [sig[0] for sig in all_sigs]
return blas_pyx_preamble.format("\n- ".join(names))
lapack_pyx_preamble = '''"""
LAPACK functions for Cython
===========================
Usable from Cython via::
cimport scipy.linalg.cython_lapack
This module provides Cython-level wrappers for all primary routines included
in LAPACK 3.4.0 except for ``zcgesv`` since its interface is not consistent
from LAPACK 3.4.0 to 3.6.0. It also provides some of the
fixed-api auxiliary routines.
These wrappers do not check for alignment of arrays.
Alignment should be checked before these wrappers are used.
Raw function pointers (Fortran-style pointer arguments):
- {}
"""
# Within SciPy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_lapack
# from scipy.linalg cimport cython_lapack
# cimport scipy.linalg.cython_lapack as cython_lapack
# cimport ..linalg.cython_lapack as cython_lapack
# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
cdef extern from "fortran_defs.h":
pass
from numpy cimport npy_complex64, npy_complex128
cdef extern from "_lapack_subroutines.h":
# Function pointer type declarations for
# gees and gges families of functions.
ctypedef bint _cselect1(npy_complex64*)
ctypedef bint _cselect2(npy_complex64*, npy_complex64*)
ctypedef bint _dselect2(d*, d*)
ctypedef bint _dselect3(d*, d*, d*)
ctypedef bint _sselect2(s*, s*)
ctypedef bint _sselect3(s*, s*, s*)
ctypedef bint _zselect1(npy_complex128*)
ctypedef bint _zselect2(npy_complex128*, npy_complex128*)
'''
def make_lapack_pyx_preamble(all_sigs):
names = [sig[0] for sig in all_sigs]
return lapack_pyx_preamble.format("\n- ".join(names))
blas_py_wrappers = """
# Python-accessible wrappers for testing:
cdef inline bint _is_contiguous(double[:,:] a, int axis) noexcept nogil:
return (a.strides[axis] == sizeof(a[0,0]) or a.shape[axis] == 1)
cpdef float complex _test_cdotc(float complex[:] cx, float complex[:] cy) noexcept nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
int incy = cy.strides[0] // sizeof(cy[0])
return cdotc(&n, &cx[0], &incx, &cy[0], &incy)
cpdef float complex _test_cdotu(float complex[:] cx, float complex[:] cy) noexcept nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
int incy = cy.strides[0] // sizeof(cy[0])
return cdotu(&n, &cx[0], &incx, &cy[0], &incy)
cpdef double _test_dasum(double[:] dx) noexcept nogil:
cdef:
int n = dx.shape[0]
int incx = dx.strides[0] // sizeof(dx[0])
return dasum(&n, &dx[0], &incx)
cpdef double _test_ddot(double[:] dx, double[:] dy) noexcept nogil:
cdef:
int n = dx.shape[0]
int incx = dx.strides[0] // sizeof(dx[0])
int incy = dy.strides[0] // sizeof(dy[0])
return ddot(&n, &dx[0], &incx, &dy[0], &incy)
cpdef int _test_dgemm(double alpha, double[:,:] a, double[:,:] b, double beta,
double[:,:] c) except -1 nogil:
cdef:
char *transa
char *transb
int m, n, k, lda, ldb, ldc
double *a0=&a[0,0]
double *b0=&b[0,0]
double *c0=&c[0,0]
# In the case that c is C contiguous, swap a and b and
# swap whether or not each of them is transposed.
# This can be done because a.dot(b) = b.T.dot(a.T).T.
if _is_contiguous(c, 1):
if _is_contiguous(a, 1):
transb = 'n'
ldb = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
elif _is_contiguous(a, 0):
transb = 't'
ldb = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
if _is_contiguous(b, 1):
transa = 'n'
lda = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
elif _is_contiguous(b, 0):
transa = 't'
lda = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
k = b.shape[0]
if k != a.shape[1]:
with gil:
raise ValueError("Shape mismatch in input arrays.")
m = b.shape[1]
n = a.shape[0]
if n != c.shape[0] or m != c.shape[1]:
with gil:
raise ValueError("Output array does not have the correct shape.")
ldc = (&c[1,0]) - c0 if c.shape[0] > 1 else 1
dgemm(transa, transb, &m, &n, &k, &alpha, b0, &lda, a0,
&ldb, &beta, c0, &ldc)
elif _is_contiguous(c, 0):
if _is_contiguous(a, 1):
transa = 't'
lda = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
elif _is_contiguous(a, 0):
transa = 'n'
lda = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
if _is_contiguous(b, 1):
transb = 't'
ldb = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
elif _is_contiguous(b, 0):
transb = 'n'
ldb = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
else:
with gil:
raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
m = a.shape[0]
k = a.shape[1]
if k != b.shape[0]:
with gil:
raise ValueError("Shape mismatch in input arrays.")
n = b.shape[1]
if m != c.shape[0] or n != c.shape[1]:
with gil:
raise ValueError("Output array does not have the correct shape.")
ldc = (&c[0,1]) - c0 if c.shape[1] > 1 else 1
dgemm(transa, transb, &m, &n, &k, &alpha, a0, &lda, b0,
&ldb, &beta, c0, &ldc)
else:
with gil:
raise ValueError("Input 'c' is neither C nor Fortran contiguous.")
return 0
cpdef double _test_dnrm2(double[:] x) noexcept nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return dnrm2(&n, &x[0], &incx)
cpdef double _test_dzasum(double complex[:] zx) noexcept nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
return dzasum(&n, &zx[0], &incx)
cpdef double _test_dznrm2(double complex[:] x) noexcept nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return dznrm2(&n, &x[0], &incx)
cpdef int _test_icamax(float complex[:] cx) noexcept nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
return icamax(&n, &cx[0], &incx)
cpdef int _test_idamax(double[:] dx) noexcept nogil:
cdef:
int n = dx.shape[0]
int incx = dx.strides[0] // sizeof(dx[0])
return idamax(&n, &dx[0], &incx)
cpdef int _test_isamax(float[:] sx) noexcept nogil:
cdef:
int n = sx.shape[0]
int incx = sx.strides[0] // sizeof(sx[0])
return isamax(&n, &sx[0], &incx)
cpdef int _test_izamax(double complex[:] zx) noexcept nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
return izamax(&n, &zx[0], &incx)
cpdef float _test_sasum(float[:] sx) noexcept nogil:
cdef:
int n = sx.shape[0]
int incx = sx.strides[0] // sizeof(sx[0])
return sasum(&n, &sx[0], &incx)
cpdef float _test_scasum(float complex[:] cx) noexcept nogil:
cdef:
int n = cx.shape[0]
int incx = cx.strides[0] // sizeof(cx[0])
return scasum(&n, &cx[0], &incx)
cpdef float _test_scnrm2(float complex[:] x) noexcept nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return scnrm2(&n, &x[0], &incx)
cpdef float _test_sdot(float[:] sx, float[:] sy) noexcept nogil:
cdef:
int n = sx.shape[0]
int incx = sx.strides[0] // sizeof(sx[0])
int incy = sy.strides[0] // sizeof(sy[0])
return sdot(&n, &sx[0], &incx, &sy[0], &incy)
cpdef float _test_snrm2(float[:] x) noexcept nogil:
cdef:
int n = x.shape[0]
int incx = x.strides[0] // sizeof(x[0])
return snrm2(&n, &x[0], &incx)
cpdef double complex _test_zdotc(double complex[:] zx, double complex[:] zy) noexcept nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
int incy = zy.strides[0] // sizeof(zy[0])
return zdotc(&n, &zx[0], &incx, &zy[0], &incy)
cpdef double complex _test_zdotu(double complex[:] zx, double complex[:] zy) noexcept nogil:
cdef:
int n = zx.shape[0]
int incx = zx.strides[0] // sizeof(zx[0])
int incy = zy.strides[0] // sizeof(zy[0])
return zdotu(&n, &zx[0], &incx, &zy[0], &incy)
"""
def generate_blas_pyx(func_sigs, sub_sigs, all_sigs, header_name):
funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs)
subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,)))
for s in sub_sigs)
return make_blas_pyx_preamble(all_sigs) + funcs + subs + blas_py_wrappers
lapack_py_wrappers = """
# Python accessible wrappers for testing:
def _test_dlamch(cmach):
# This conversion is necessary to handle Python 3 strings.
cmach_bytes = bytes(cmach)
# Now that it is a bytes representation, a non-temporary variable
# must be passed as a part of the function call.
cdef char* cmach_char = cmach_bytes
return dlamch(cmach_char)
def _test_slamch(cmach):
# This conversion is necessary to handle Python 3 strings.
cmach_bytes = bytes(cmach)
# Now that it is a bytes representation, a non-temporary variable
# must be passed as a part of the function call.
cdef char* cmach_char = cmach_bytes
return slamch(cmach_char)
"""
def generate_lapack_pyx(func_sigs, sub_sigs, all_sigs, header_name):
funcs = "\n".join(pyx_decl_func(*(s+(header_name,))) for s in func_sigs)
subs = "\n" + "\n".join(pyx_decl_sub(*(s[::2]+(header_name,)))
for s in sub_sigs)
preamble = make_lapack_pyx_preamble(all_sigs)
return preamble + funcs + subs + lapack_py_wrappers
pxd_template = """ctypedef {ret_type} {name}_t({args}) noexcept nogil
cdef {name}_t *{name}_f
"""
pxd_template = """cdef {ret_type} {name}({args}) noexcept nogil
"""
def pxd_decl(name, ret_type, args):
args = args.replace('lambda', 'lambda_').replace('*in,', '*in_,')
return pxd_template.format(name=name, ret_type=ret_type, args=args)
blas_pxd_preamble = """# Within scipy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_blas
# from scipy.linalg cimport cython_blas
# cimport scipy.linalg.cython_blas as cython_blas
# cimport ..linalg.cython_blas as cython_blas
# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
ctypedef float s
ctypedef double d
ctypedef float complex c
ctypedef double complex z
"""
def generate_blas_pxd(all_sigs):
body = '\n'.join(pxd_decl(*sig) for sig in all_sigs)
return blas_pxd_preamble + body
lapack_pxd_preamble = """# Within SciPy, these wrappers can be used via relative or absolute cimport.
# Examples:
# from ..linalg cimport cython_lapack
# from scipy.linalg cimport cython_lapack
# cimport scipy.linalg.cython_lapack as cython_lapack
# cimport ..linalg.cython_lapack as cython_lapack
# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
# these wrappers should not be used.
# The original libraries should be linked directly.
ctypedef float s
ctypedef double d
ctypedef float complex c
ctypedef double complex z
# Function pointer type declarations for
# gees and gges families of functions.
ctypedef bint cselect1(c*)
ctypedef bint cselect2(c*, c*)
ctypedef bint dselect2(d*, d*)
ctypedef bint dselect3(d*, d*, d*)
ctypedef bint sselect2(s*, s*)
ctypedef bint sselect3(s*, s*, s*)
ctypedef bint zselect1(z*)
ctypedef bint zselect2(z*, z*)
"""
def generate_lapack_pxd(all_sigs):
return lapack_pxd_preamble + '\n'.join(pxd_decl(*sig) for sig in all_sigs)
fortran_template = """ subroutine {name}wrp(
+ ret,
+ {argnames}
+ )
external {wrapper}
{ret_type} {wrapper}
{ret_type} ret
{argdecls}
ret = {wrapper}(
+ {argnames}
+ )
end
"""
dims = {'work': '(*)', 'ab': '(ldab,*)', 'a': '(lda,*)', 'dl': '(*)',
'd': '(*)', 'du': '(*)', 'ap': '(*)', 'e': '(*)', 'lld': '(*)'}
xy_specialized_dims = {'x': '', 'y': ''}
a_specialized_dims = {'a': '(*)'}
special_cases = defaultdict(dict,
ladiv = xy_specialized_dims,
lanhf = a_specialized_dims,
lansf = a_specialized_dims,
lapy2 = xy_specialized_dims,
lapy3 = xy_specialized_dims)
def process_fortran_name(name, funcname):
if 'inc' in name:
return name
special = special_cases[funcname[1:]]
if 'x' in name or 'y' in name:
suffix = special.get(name, '(n)')
else:
suffix = special.get(name, '')
return name + suffix
def called_name(name):
included = ['cdotc', 'cdotu', 'zdotc', 'zdotu', 'cladiv', 'zladiv']
if name in included:
return "w" + name
return name
def fort_subroutine_wrapper(name, ret_type, args):
wrapper = called_name(name)
types, names = arg_names_and_types(args)
argnames = ',\n + '.join(names)
names = [process_fortran_name(n, name) for n in names]
argdecls = '\n '.join(f'{fortran_types[t]} {n}'
for n, t in zip(names, types))
return fortran_template.format(name=name, wrapper=wrapper,
argnames=argnames, argdecls=argdecls,
ret_type=fortran_types[ret_type])
def generate_fortran(func_sigs):
return "\n".join(fort_subroutine_wrapper(*sig) for sig in func_sigs)
def make_c_args(args):
types, names = arg_names_and_types(args)
types = [c_types[arg] for arg in types]
return ', '.join(f'{t} *{n}' for t, n in zip(types, names))
c_func_template = ("void F_FUNC({name}wrp, {upname}WRP)"
"({return_type} *ret, {args});\n")
def c_func_decl(name, return_type, args):
args = make_c_args(args)
return_type = c_types[return_type]
return c_func_template.format(name=name, upname=name.upper(),
return_type=return_type, args=args)
c_sub_template = "void F_FUNC({name},{upname})({args});\n"
def c_sub_decl(name, return_type, args):
args = make_c_args(args)
return c_sub_template.format(name=name, upname=name.upper(), args=args)
c_preamble = """#ifndef SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H
#define SCIPY_LINALG_{lib}_FORTRAN_WRAPPERS_H
#include "fortran_defs.h"
#include "numpy/arrayobject.h"
"""
lapack_decls = """
typedef int (*_cselect1)(npy_complex64*);
typedef int (*_cselect2)(npy_complex64*, npy_complex64*);
typedef int (*_dselect2)(double*, double*);
typedef int (*_dselect3)(double*, double*, double*);
typedef int (*_sselect2)(float*, float*);
typedef int (*_sselect3)(float*, float*, float*);
typedef int (*_zselect1)(npy_complex128*);
typedef int (*_zselect2)(npy_complex128*, npy_complex128*);
"""
cpp_guard = """
#ifdef __cplusplus
extern "C" {
#endif
"""
c_end = """
#ifdef __cplusplus
}
#endif
#endif
"""
def generate_c_header(func_sigs, sub_sigs, all_sigs, lib_name):
funcs = "".join(c_func_decl(*sig) for sig in func_sigs)
subs = "\n" + "".join(c_sub_decl(*sig) for sig in sub_sigs)
if lib_name == 'LAPACK':
preamble = (c_preamble.format(lib=lib_name) + lapack_decls)
else:
preamble = c_preamble.format(lib=lib_name)
return "".join([preamble, cpp_guard, funcs, subs, c_end])
def split_signature(sig):
name_and_type, args = sig[:-1].split('(')
ret_type, name = name_and_type.split(' ')
return name, ret_type, args
def filter_lines(lines):
lines = [line for line in map(str.strip, lines)
if line and not line.startswith('#')]
func_sigs = [split_signature(line) for line in lines
if line.split(' ')[0] != 'void']
sub_sigs = [split_signature(line) for line in lines
if line.split(' ')[0] == 'void']
all_sigs = list(sorted(func_sigs + sub_sigs, key=itemgetter(0)))
return func_sigs, sub_sigs, all_sigs
def newer(source, target):
"""
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
def all_newer(src_files, dst_files):
return all(os.path.exists(dst) and newer(dst, src)
for dst in dst_files for src in src_files)
def make_all(outdir,
blas_signature_file="cython_blas_signatures.txt",
lapack_signature_file="cython_lapack_signatures.txt",
blas_name="cython_blas",
lapack_name="cython_lapack",
blas_fortran_name="_blas_subroutine_wrappers.f",
lapack_fortran_name="_lapack_subroutine_wrappers.f",
blas_header_name="_blas_subroutines.h",
lapack_header_name="_lapack_subroutines.h"):
src_files = (os.path.abspath(__file__),
blas_signature_file,
lapack_signature_file)
dst_files = (blas_name + '.pyx',
blas_name + '.pxd',
blas_fortran_name,
blas_header_name,
lapack_name + '.pyx',
lapack_name + '.pxd',
lapack_fortran_name,
lapack_header_name)
dst_files = (os.path.join(outdir, f) for f in dst_files)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print("scipy/linalg/_generate_pyx.py: all files up-to-date")
return
comments = ["This file was generated by _generate_pyx.py.\n",
"Do not edit this file directly.\n"]
ccomment = ''.join(['/* ' + line.rstrip() + ' */\n'
for line in comments]) + '\n'
pyxcomment = ''.join(['# ' + line for line in comments]) + '\n'
fcomment = ''.join(['c ' + line for line in comments]) + '\n'
with open(blas_signature_file) as f:
blas_sigs = f.readlines()
blas_sigs = filter_lines(blas_sigs)
blas_pyx = generate_blas_pyx(*(blas_sigs + (blas_header_name,)))
with open(os.path.join(outdir, blas_name + '.pyx'), 'w') as f:
f.write(pyxcomment)
f.write(blas_pyx)
blas_pxd = generate_blas_pxd(blas_sigs[2])
with open(os.path.join(outdir, blas_name + '.pxd'), 'w') as f:
f.write(pyxcomment)
f.write(blas_pxd)
blas_fortran = generate_fortran(blas_sigs[0])
with open(os.path.join(outdir, blas_fortran_name), 'w') as f:
f.write(fcomment)
f.write(blas_fortran)
blas_c_header = generate_c_header(*(blas_sigs + ('BLAS',)))
with open(os.path.join(outdir, blas_header_name), 'w') as f:
f.write(ccomment)
f.write(blas_c_header)
with open(lapack_signature_file) as f:
lapack_sigs = f.readlines()
lapack_sigs = filter_lines(lapack_sigs)
lapack_pyx = generate_lapack_pyx(*(lapack_sigs + (lapack_header_name,)))
with open(os.path.join(outdir, lapack_name + '.pyx'), 'w') as f:
f.write(pyxcomment)
f.write(lapack_pyx)
lapack_pxd = generate_lapack_pxd(lapack_sigs[2])
with open(os.path.join(outdir, lapack_name + '.pxd'), 'w') as f:
f.write(pyxcomment)
f.write(lapack_pxd)
lapack_fortran = generate_fortran(lapack_sigs[0])
with open(os.path.join(outdir, lapack_fortran_name), 'w') as f:
f.write(fcomment)
f.write(lapack_fortran)
lapack_c_header = generate_c_header(*(lapack_sigs + ('LAPACK',)))
with open(os.path.join(outdir, lapack_header_name), 'w') as f:
f.write(ccomment)
f.write(lapack_c_header)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.outdir:
#raise ValueError(f"Missing `--outdir` argument to _generate_pyx.py")
# We're dealing with a distutils build here, write in-place:
outdir_abs = os.path.abspath(os.path.dirname(__file__))
else:
outdir_abs = os.path.join(os.getcwd(), args.outdir)
make_all(outdir_abs)
| 26,513
| 32.689962
| 101
|
py
|
scipy
|
scipy-main/scipy/linalg/interpolative.py
|
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
# Python module for interfacing with `id_dist`.
r"""
======================================================================
Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
======================================================================
.. moduleauthor:: Kenneth L. Ho <klho@stanford.edu>
.. versionadded:: 0.13
.. currentmodule:: scipy.linalg.interpolative
An interpolative decomposition (ID) of a matrix :math:`A \in
\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
factorization
.. math::
A \Pi =
\begin{bmatrix}
A \Pi_{1} & A \Pi_{2}
\end{bmatrix} =
A \Pi_{1}
\begin{bmatrix}
I & T
\end{bmatrix},
where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
are the *skeleton* and *interpolation matrices*, respectively.
If :math:`A` does not have exact rank :math:`k`, then there exists an
approximation in the form of an ID such that :math:`A = BP + E`, where
:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
+ 1}` is the best possible error for a rank-:math:`k` approximation
and, in fact, is achieved by the singular value decomposition (SVD)
:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
\times k}` is diagonal with nonnegative entries. The principal
advantages of using an ID over an SVD are that:
- it is cheaper to construct;
- it preserves the structure of :math:`A`; and
- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
Routines
========
Main functionality:
.. autosummary::
:toctree: generated/
interp_decomp
reconstruct_matrix_from_id
reconstruct_interp_matrix
reconstruct_skel_matrix
id_to_svd
svd
estimate_spectral_norm
estimate_spectral_norm_diff
estimate_rank
Support functions:
.. autosummary::
:toctree: generated/
seed
rand
References
==========
This module uses the ID software package [1]_ by Martinsson, Rokhlin,
Shkolnisky, and Tygert, which is a Fortran library for computing IDs
using various algorithms, including the rank-revealing QR approach of
[2]_ and the more recent randomized methods described in [3]_, [4]_,
and [5]_. This module exposes its functionality in a way convenient
for Python users. Note that this module does not add any functionality
beyond that of organizing a simpler and more consistent interface.
We advise the user to consult also the `documentation for the ID package
<http://tygert.com/id_doc.4.pdf>`_.
.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
software package for low-rank approximation of matrices via interpolative
decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
2005. :doi:`10.1137/030602678`.
.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
Tygert. "Randomized algorithms for the low-rank approximation of matrices."
*Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
:doi:`10.1073/pnas.0709640104`.
.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
(1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.
.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
randomized algorithm for the approximation of matrices." *Appl. Comput.
Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
Tutorial
========
Initializing
------------
The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
command:
>>> import scipy.linalg.interpolative as sli
Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
know to have low rank:
>>> from scipy.linalg import hilbert
>>> n = 1000
>>> A = hilbert(n)
We can also do this explicitly via:
>>> import numpy as np
>>> n = 1000
>>> A = np.empty((n, n), order='F')
>>> for j in range(n):
>>> for i in range(n):
>>> A[i,j] = 1. / (i + j + 1)
Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
instantiates the matrix in Fortran-contiguous order and is important for
avoiding data copying when passing to the backend.
We then define multiplication routines for the matrix by regarding it as a
:class:`scipy.sparse.linalg.LinearOperator`:
>>> from scipy.sparse.linalg import aslinearoperator
>>> L = aslinearoperator(A)
This automatically sets up methods describing the action of the matrix and its
adjoint on a vector.
Computing an ID
---------------
We have several choices of algorithm to compute an ID. These fall largely
according to two dichotomies:
1. how the matrix is represented, i.e., via its entries or via its action on a
vector; and
2. whether to approximate it to a fixed relative precision or to a fixed rank.
We step through each choice in turn below.
In all cases, the ID is represented by three parameters:
1. a rank ``k``;
2. an index array ``idx``; and
3. interpolation coefficients ``proj``.
The ID is specified by the relation
``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
From matrix entries
...................
We first consider a matrix given in terms of its entries.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(A, eps)
where ``eps < 1`` is the desired precision.
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(A, k)
where ``k >= 1`` is the desired rank.
Both algorithms use random sampling and are usually faster than the
corresponding older, deterministic algorithms, which can be accessed via the
commands:
>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
and:
>>> idx, proj = sli.interp_decomp(A, k, rand=False)
respectively.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector as a
:class:`scipy.sparse.linalg.LinearOperator`.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(L, eps)
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(L, k)
These algorithms are randomized.
Reconstructing an ID
--------------------
The ID routines above do not output the skeleton and interpolation matrices
explicitly but instead return the relevant information in a more compact (and
sometimes more useful) form. To build these matrices, write:
>>> B = sli.reconstruct_skel_matrix(A, k, idx)
for the skeleton matrix and:
>>> P = sli.reconstruct_interp_matrix(idx, proj)
for the interpolation matrix. The ID approximation can then be computed as:
>>> C = np.dot(B, P)
This can also be constructed directly using:
>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
without having to first compute ``P``.
Alternatively, this can be done explicitly as well using:
>>> B = A[:,idx[:k]]
>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
>>> C = np.dot(B, P)
Computing an SVD
----------------
An ID can be converted to an SVD via the command:
>>> U, S, V = sli.id_to_svd(B, idx, proj)
The SVD approximation is then:
>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
The SVD can also be computed "fresh" by combining both the ID and conversion
steps into one command. Following the various ID algorithms above, there are
correspondingly various SVD algorithms that one can employ.
From matrix entries
...................
We consider first SVD algorithms for a matrix given in terms of its entries.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(A, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(A, k)
Both algorithms use random sampling; for the determinstic versions, issue the
keyword ``rand=False`` as above.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(L, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(L, k)
Utility routines
----------------
Several utility routines are also available.
To estimate the spectral norm of a matrix, use:
>>> snorm = sli.estimate_spectral_norm(A)
This algorithm is based on the randomized power method and thus requires only
matrix-vector products. The number of iterations to take can be set using the
keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
as a :class:`numpy.ndarray`, in which case it is trivially converted using
:func:`scipy.sparse.linalg.aslinearoperator`.
The same algorithm can also estimate the spectral norm of the difference of two
matrices ``A1`` and ``A2`` as follows:
>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
This is often useful for checking the accuracy of a matrix approximation.
Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
of a matrix as well. This can be done with either:
>>> k = sli.estimate_rank(A, eps)
or:
>>> k = sli.estimate_rank(L, eps)
depending on the representation. The parameter ``eps`` controls the definition
of the numerical rank.
Finally, the random number generation required for all randomized routines can
be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
values to their original values, use:
>>> sli.seed('default')
To specify the seed values, use:
>>> sli.seed(s)
where ``s`` must be an integer or array of 55 floats. If an integer, the array
of floats is obtained by using ``numpy.random.rand`` with the given integer
seed.
To simply generate some random numbers, type:
>>> sli.rand(n)
where ``n`` is the number of random numbers to generate.
Remarks
-------
The above functions all automatically detect the appropriate interface and work
with both real and complex data types, passing input arguments to the proper
backend routine.
"""
import scipy.linalg._interpolative_backend as _backend
import numpy as np
import sys
__all__ = [
'estimate_rank',
'estimate_spectral_norm',
'estimate_spectral_norm_diff',
'id_to_svd',
'interp_decomp',
'rand',
'reconstruct_interp_matrix',
'reconstruct_matrix_from_id',
'reconstruct_skel_matrix',
'seed',
'svd',
]
_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
_32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems "
"with complex128 is buggy")
_IS_32BIT = (sys.maxsize < 2**32)
def _is_real(A):
try:
if A.dtype == np.complex128:
return False
elif A.dtype == np.float64:
return True
else:
raise _DTYPE_ERROR
except AttributeError as e:
raise _TYPE_ERROR from e
def seed(seed=None):
"""
Seed the internal random number generator used in this ID package.
The generator is a lagged Fibonacci method with 55-element internal state.
Parameters
----------
seed : int, sequence, 'default', optional
If 'default', the random seed is reset to a default value.
If `seed` is a sequence containing 55 floating-point numbers
in range [0,1], these are used to set the internal state of
the generator.
If the value is an integer, the internal state is obtained
from `numpy.random.RandomState` (MT19937) with the integer
used as the initial seed.
If `seed` is omitted (None), ``numpy.random.rand`` is used to
initialize the generator.
"""
# For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,
# and :func:`_backend.id_srando`.
if isinstance(seed, str) and seed == 'default':
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if state.shape != (55,):
raise ValueError("invalid input size")
elif state.min() < 0 or state.max() > 1:
raise ValueError("values not in range [0,1]")
_backend.id_srandi(state)
elif seed is None:
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55))
def rand(*shape):
"""
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
This routine is used for all random number generation in this package and
can affect ID and SVD results.
Parameters
----------
*shape
Shape of output array
"""
# For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.
return _backend.id_srand(np.prod(shape)).reshape(shape)
def interp_decomp(A, eps_or_k, rand=True):
"""
Compute ID of a matrix.
An ID of a matrix `A` is a factorization defined by a rank `k`, a column
index array `idx`, and interpolation coefficients `proj` such that::
numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
The original matrix can then be reconstructed as::
numpy.hstack([A[:,idx[:k]],
numpy.dot(A[:,idx[:k]], proj)]
)[:,numpy.argsort(idx)]
or via the routine :func:`reconstruct_matrix_from_id`. This can
equivalently be written as::
numpy.dot(A[:,idx[:k]],
numpy.hstack([numpy.eye(k), proj])
)[:,np.argsort(idx)]
in terms of the skeleton and interpolation matrices::
B = A[:,idx[:k]]
and::
P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
respectively. See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
The ID can be computed to any relative precision or rank (depending on the
value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
this function has the output signature::
k, idx, proj = interp_decomp(A, eps_or_k)
Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
signature is::
idx, proj = interp_decomp(A, eps_or_k)
.. This function automatically detects the form of the input parameters
and passes them to the appropriate backend. For details, see
:func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
:func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
:func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
:func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
:func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
:func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
Matrix to be factored
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
k : int
Rank required to achieve specified relative precision if
`eps_or_k < 1`.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
k, idx, proj = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_aid(eps, A)
else:
if real:
k, idx, proj = _backend.iddp_id(eps, A)
else:
k, idx, proj = _backend.idzp_id(eps, A)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if rand:
if real:
idx, proj = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_aid(A, k)
else:
if real:
idx, proj = _backend.iddr_id(A, k)
else:
idx, proj = _backend.idzr_id(A, k)
return idx - 1, proj
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if eps_or_k < 1:
eps = eps_or_k
if real:
k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if real:
idx, proj = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_rid(m, n, matveca, k)
return idx - 1, proj
else:
raise _TYPE_ERROR
def reconstruct_matrix_from_id(B, idx, proj):
"""
Reconstruct matrix from its ID.
A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
and `proj`, respectively, can be reconstructed as::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconid` and
:func:`_backend.idz_reconid`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Reconstructed matrix.
"""
if _is_real(B):
return _backend.idd_reconid(B, idx + 1, proj)
else:
return _backend.idz_reconid(B, idx + 1, proj)
def reconstruct_interp_matrix(idx, proj):
"""
Reconstruct interpolation matrix from ID.
The interpolation matrix can be reconstructed from the ID indices and
coefficients `idx` and `proj`, respectively, as::
P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
The original matrix can then be reconstructed from its skeleton matrix `B`
via::
numpy.dot(B, P)
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconint` and
:func:`_backend.idz_reconint`.
Parameters
----------
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Interpolation matrix.
"""
if _is_real(proj):
return _backend.idd_reconint(idx + 1, proj)
else:
return _backend.idz_reconint(idx + 1, proj)
def reconstruct_skel_matrix(A, k, idx):
"""
Reconstruct skeleton matrix from ID.
The skeleton matrix can be reconstructed from the original matrix `A` and its
ID rank and indices `k` and `idx`, respectively, as::
B = A[:,idx[:k]]
The original matrix can then be reconstructed via::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_interp_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_copycols` and
:func:`_backend.idz_copycols`.
Parameters
----------
A : :class:`numpy.ndarray`
Original matrix.
k : int
Rank of ID.
idx : :class:`numpy.ndarray`
Column index array.
Returns
-------
:class:`numpy.ndarray`
Skeleton matrix.
"""
if _is_real(A):
return _backend.idd_copycols(A, k, idx + 1)
else:
return _backend.idz_copycols(A, k, idx + 1)
def id_to_svd(B, idx, proj):
"""
Convert ID to SVD.
The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
coefficients `idx` and `proj`, respectively, is::
U, S, V = id_to_svd(B, idx, proj)
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
See also :func:`svd`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_id2svd` and
:func:`_backend.idz_id2svd`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
if _is_real(B):
U, V, S = _backend.idd_id2svd(B, idx + 1, proj)
else:
U, V, S = _backend.idz_id2svd(B, idx + 1, proj)
return U, S, V
def estimate_spectral_norm(A, its=20):
"""
Estimate spectral norm of a matrix by the randomized power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_snorm` and
:func:`_backend.idz_snorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
m, n = A.shape
def matvec(x):
return A.matvec(x)
def matveca(x):
return A.rmatvec(x)
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its)
def estimate_spectral_norm_diff(A, B, its=20):
"""
Estimate spectral norm of the difference of two matrices by the randomized
power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
:func:`_backend.idz_diffsnorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
B : :class:`scipy.sparse.linalg.LinearOperator`
Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate of matrix difference.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
m, n = A.shape
def matvec1(x):
return A.matvec(x)
def matveca1(x):
return A.rmatvec(x)
def matvec2(x):
return B.matvec(x)
def matveca2(x):
return B.rmatvec(x)
if _is_real(A):
return _backend.idd_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
def svd(A, eps_or_k, rand=True):
"""
Compute SVD of a matrix via an ID.
An SVD of a matrix `A` is a factorization::
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
where `U` and `V` have orthonormal columns and `S` is nonnegative.
The SVD can be computed to any relative precision or rank (depending on the
value of `eps_or_k`).
See also :func:`interp_decomp` and :func:`id_to_svd`.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details, see
:func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
:func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
:func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
:func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
:func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
:func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix to be factored, given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
`rmatvec` methods (to apply the matrix and its adjoint).
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
U, V, S = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_asvd(eps, A)
else:
if real:
U, V, S = _backend.iddp_svd(eps, A)
else:
U, V, S = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if k > min(A.shape):
raise ValueError("Approximation rank {} exceeds min(A.shape) = "
" {} ".format(k, min(A.shape)))
if rand:
if real:
U, V, S = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_asvd(A, k)
else:
if real:
U, V, S = _backend.iddr_svd(A, k)
else:
U, V, S = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
m, n = A.shape
def matvec(x):
return A.matvec(x)
def matveca(x):
return A.rmatvec(x)
if eps_or_k < 1:
eps = eps_or_k
if real:
U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return U, S, V
def estimate_rank(A, eps):
"""
Estimate matrix rank to a specified relative precision using randomized
methods.
The matrix `A` can be given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
for each case. If `A` is of type :class:`numpy.ndarray`, then the output
rank is typically about 8 higher than the actual numerical rank.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details,
see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
:func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix whose rank is to be estimated, given as either a
:class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
with the `rmatvec` method (to apply the matrix adjoint).
eps : float
Relative error for numerical rank definition.
Returns
-------
int
Estimated matrix rank.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if rank == 0:
# special return value for nearly full rank
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR
| 32,188
| 30.775913
| 92
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_qz.py
|
import warnings
import numpy as np
from numpy import asarray_chkfinite
from ._misc import LinAlgError, _datacopied, LinAlgWarning
from .lapack import get_lapack_funcs
__all__ = ['qz', 'ordqz']
_double_precision = ['i', 'l', 'd']
def _select_function(sort):
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = _lhp
elif sort == 'rhp':
sfunction = _rhp
elif sort == 'iuc':
sfunction = _iuc
elif sort == 'ouc':
sfunction = _ouc
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
return sfunction
def _lhp(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0)
return out
def _rhp(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0)
return out
def _iuc(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0)
return out
def _ouc(x, y):
out = np.empty_like(x, dtype=bool)
xzero = (x == 0)
yzero = (y == 0)
out[xzero & yzero] = False
out[~xzero & yzero] = True
out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0)
return out
def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
if sort is not None:
# Disabled due to segfaults on win32, see ticket 1717.
raise ValueError("The 'sort' input of qz() has to be None and will be "
"removed in a future release. Use ordqz instead.")
if output not in ['real', 'complex', 'r', 'c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
else:
a1 = np.asarray(A)
b1 = np.asarray(B)
a_m, a_n = a1.shape
b_m, b_n = b1.shape
if not (a_m == a_n == b_m == b_n):
raise ValueError("Array dimensions must be square and agree")
typa = a1.dtype.char
if output in ['complex', 'c'] and typa not in ['F', 'D']:
if typa in _double_precision:
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if output in ['complex', 'c'] and typb not in ['F', 'D']:
if typb in _double_precision:
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, A))
overwrite_b = overwrite_b or (_datacopied(b1, B))
gges, = get_lapack_funcs(('gges',), (a1, b1))
if lwork is None or lwork == -1:
# get optimal work array size
result = gges(lambda x: None, a1, b1, lwork=-1)
lwork = result[-2][0].real.astype(np.int_)
def sfunction(x):
return None
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b, sort_t=0)
info = result[-1]
if info < 0:
raise ValueError(f"Illegal value in argument {-info} of gges")
elif info > 0 and info <= a_n:
warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be "
"correct for J={},...,N".format(info-1), LinAlgWarning,
stacklevel=3)
elif info == a_n+1:
raise LinAlgError("Something other than QZ iteration failed")
elif info == a_n+2:
raise LinAlgError("After reordering, roundoff changed values of some "
"complex eigenvalues so that leading eigenvalues "
"in the Generalized Schur form no longer satisfy "
"sort=True. This could also be due to scaling.")
elif info == a_n+3:
raise LinAlgError("Reordering failed in <s,d,c,z>tgsen")
return result, gges.typecode
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
QZ decomposition for generalized eigenvalues of a pair of matrices.
The QZ, or generalized Schur, decomposition for a pair of n-by-n
matrices (A,B) is::
(A,B) = (Q @ AA @ Z*, Q @ BB @ Z*)
where AA, BB is in generalized Schur form if BB is upper-triangular
with non-negative diagonal and AA is upper-triangular, or for real QZ
decomposition (``output='real'``) block upper triangular with 1x1
and 2x2 blocks. In this case, the 1x1 blocks correspond to real
generalized eigenvalues and 2x2 blocks are 'standardized' by making
the corresponding elements of BB have the form::
[ a 0 ]
[ 0 b ]
and the pair of corresponding 2x2 blocks in AA and BB will have a complex
conjugate pair of generalized eigenvalues. If (``output='complex'``) or
A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
Q and Z are unitary matrices.
Parameters
----------
A : (N, N) array_like
2-D array to decompose
B : (N, N) array_like
2-D array to decompose
output : {'real', 'complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead.
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue
``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or
output='complex', the sort function takes two complex arguments
(alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively,
string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
overwrite_b : bool, optional
Whether to overwrite data in b (may improve performance)
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
See Also
--------
ordqz
Notes
-----
Q is transposed versus the equivalent function in Matlab.
.. versionadded:: 0.11.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import qz
>>> A = np.array([[1, 2, -1], [5, 5, 5], [2, 4, -8]])
>>> B = np.array([[1, 1, -3], [3, 1, -1], [5, 6, -2]])
Compute the decomposition. The QZ decomposition is not unique, so
depending on the underlying library that is used, there may be
differences in the signs of coefficients in the following output.
>>> AA, BB, Q, Z = qz(A, B)
>>> AA
array([[-1.36949157, -4.05459025, 7.44389431],
[ 0. , 7.65653432, 5.13476017],
[ 0. , -0.65978437, 2.4186015 ]]) # may vary
>>> BB
array([[ 1.71890633, -1.64723705, -0.72696385],
[ 0. , 8.6965692 , -0. ],
[ 0. , 0. , 2.27446233]]) # may vary
>>> Q
array([[-0.37048362, 0.1903278 , 0.90912992],
[-0.90073232, 0.16534124, -0.40167593],
[ 0.22676676, 0.96769706, -0.11017818]]) # may vary
>>> Z
array([[-0.67660785, 0.63528924, -0.37230283],
[ 0.70243299, 0.70853819, -0.06753907],
[ 0.22088393, -0.30721526, -0.92565062]]) # may vary
Verify the QZ decomposition. With real output, we only need the
transpose of ``Z`` in the following expressions.
>>> Q @ AA @ Z.T # Should be A
array([[ 1., 2., -1.],
[ 5., 5., 5.],
[ 2., 4., -8.]])
>>> Q @ BB @ Z.T # Should be B
array([[ 1., 1., -3.],
[ 3., 1., -1.],
[ 5., 6., -2.]])
Repeat the decomposition, but with ``output='complex'``.
>>> AA, BB, Q, Z = qz(A, B, output='complex')
For conciseness in the output, we use ``np.set_printoptions()`` to set
the output precision of NumPy arrays to 3 and display tiny values as 0.
>>> np.set_printoptions(precision=3, suppress=True)
>>> AA
array([[-1.369+0.j , 2.248+4.237j, 4.861-5.022j],
[ 0. +0.j , 7.037+2.922j, 0.794+4.932j],
[ 0. +0.j , 0. +0.j , 2.655-1.103j]]) # may vary
>>> BB
array([[ 1.719+0.j , -1.115+1.j , -0.763-0.646j],
[ 0. +0.j , 7.24 +0.j , -3.144+3.322j],
[ 0. +0.j , 0. +0.j , 2.732+0.j ]]) # may vary
>>> Q
array([[ 0.326+0.175j, -0.273-0.029j, -0.886-0.052j],
[ 0.794+0.426j, -0.093+0.134j, 0.402-0.02j ],
[-0.2 -0.107j, -0.816+0.482j, 0.151-0.167j]]) # may vary
>>> Z
array([[ 0.596+0.32j , -0.31 +0.414j, 0.393-0.347j],
[-0.619-0.332j, -0.479+0.314j, 0.154-0.393j],
[-0.195-0.104j, 0.576+0.27j , 0.715+0.187j]]) # may vary
With complex arrays, we must use ``Z.conj().T`` in the following
expressions to verify the decomposition.
>>> Q @ AA @ Z.conj().T # Should be A
array([[ 1.-0.j, 2.-0.j, -1.-0.j],
[ 5.+0.j, 5.+0.j, 5.-0.j],
[ 2.+0.j, 4.+0.j, -8.+0.j]])
>>> Q @ BB @ Z.conj().T # Should be B
array([[ 1.+0.j, 1.+0.j, -3.+0.j],
[ 3.-0.j, 1.-0.j, -1.+0.j],
[ 5.+0.j, 6.+0.j, -2.+0.j]])
"""
# output for real
# AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
# output for complex
# AA, BB, sdim, alpha, beta, vsl, vsr, work, info
result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
check_finite=check_finite)
return result[0], result[1], result[-4], result[-3]
def ordqz(A, B, sort='lhp', output='real', overwrite_a=False,
overwrite_b=False, check_finite=True):
"""QZ decomposition for a pair of matrices with reordering.
Parameters
----------
A : (N, N) array_like
2-D array to decompose
B : (N, N) array_like
2-D array to decompose
sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A
callable may be passed that, given an ordered pair ``(alpha,
beta)`` representing the eigenvalue ``x = (alpha/beta)``,
returns a boolean denoting whether the eigenvalue should be
sorted to the top-left (True). For the real matrix pairs
``beta`` is real while ``alpha`` can be complex, and for
complex matrix pairs both ``alpha`` and ``beta`` can be
complex. The callable must be able to accept a NumPy
array. Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() < 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
With the predefined sorting functions, an infinite eigenvalue
(i.e., ``alpha != 0`` and ``beta = 0``) is considered to lie in
neither the left-hand nor the right-hand plane, but it is
considered to lie outside the unit circle. For the eigenvalue
``(alpha, beta) = (0, 0)``, the predefined sorting functions
all return `False`.
output : str {'real','complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
overwrite_a : bool, optional
If True, the contents of A are overwritten.
overwrite_b : bool, optional
If True, the contents of B are overwritten.
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
alpha : (N,) ndarray
alpha = alphar + alphai * 1j. See notes.
beta : (N,) ndarray
See notes.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
See Also
--------
qz
Notes
-----
On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the
generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and
``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)
that would result if the 2-by-2 diagonal blocks of the real generalized
Schur form of (A,B) were further reduced to triangular form using complex
unitary transformations. If ALPHAI(j) is zero, then the jth eigenvalue is
real; if positive, then the ``j``th and ``(j+1)``st eigenvalues are a
complex conjugate pair, with ``ALPHAI(j+1)`` negative.
.. versionadded:: 0.17.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import ordqz
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]])
>>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp')
Since we have sorted for left half plane eigenvalues, negatives come first
>>> (alpha/beta).real < 0
array([ True, True, False, False], dtype=bool)
"""
(AA, BB, _, *ab, Q, Z, _, _), typ = _qz(A, B, output=output, sort=None,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b,
check_finite=check_finite)
if typ == 's':
alpha, beta = ab[0] + ab[1]*np.complex64(1j), ab[2]
elif typ == 'd':
alpha, beta = ab[0] + ab[1]*1.j, ab[2]
else:
alpha, beta = ab
sfunction = _select_function(sort)
select = sfunction(alpha, beta)
tgsen = get_lapack_funcs('tgsen', (AA, BB))
# the real case needs 4n + 16 lwork
lwork = 4*AA.shape[0] + 16 if typ in 'sd' else 1
AAA, BBB, *ab, QQ, ZZ, _, _, _, _, info = tgsen(select, AA, BB, Q, Z,
ijob=0,
lwork=lwork, liwork=1)
# Once more for tgsen output
if typ == 's':
alpha, beta = ab[0] + ab[1]*np.complex64(1j), ab[2]
elif typ == 'd':
alpha, beta = ab[0] + ab[1]*1.j, ab[2]
else:
alpha, beta = ab
if info < 0:
raise ValueError(f"Illegal value in argument {-info} of tgsen")
elif info == 1:
raise ValueError("Reordering of (A, B) failed because the transformed"
" matrix pair (A, B) would be too far from "
"generalized Schur form; the problem is very "
"ill-conditioned. (A, B) may have been partially "
"reordered.")
return AAA, BBB, alpha, beta, QQ, ZZ
| 16,336
| 35.304444
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_misc.py
|
import numpy as np
from numpy.linalg import LinAlgError
from .blas import get_blas_funcs
from .lapack import get_lapack_funcs
__all__ = ['LinAlgError', 'LinAlgWarning', 'norm']
class LinAlgWarning(RuntimeWarning):
"""
The warning emitted when a linear algebra related operation is close
to fail conditions of the algorithm or loss of accuracy is expected.
"""
pass
def norm(a, ord=None, axis=None, keepdims=False, check_finite=True):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter. For tensors with rank different from
1 or 2, only `ord=None` is supported.
Parameters
----------
a : array_like
Input array. If `axis` is None, `a` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``a.ravel`` will be returned.
ord : {int, inf, -inf, 'fro', 'nuc', None}, optional
Order of the norm (see table under ``Notes``). inf means NumPy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `a` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `a`
is 1-D) or a matrix norm (when `a` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `a`.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(a), axis=1)) max(abs(a))
-inf min(sum(abs(a), axis=1)) min(abs(a))
0 -- sum(a != 0)
1 max(sum(abs(a), axis=0)) as below
-1 min(sum(abs(a), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(a)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import norm
>>> a = np.arange(9) - 4.0
>>> a
array([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> b = a.reshape((3, 3))
>>> b
array([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> norm(a)
7.745966692414834
>>> norm(b)
7.745966692414834
>>> norm(b, 'fro')
7.745966692414834
>>> norm(a, np.inf)
4
>>> norm(b, np.inf)
9
>>> norm(a, -np.inf)
0
>>> norm(b, -np.inf)
2
>>> norm(a, 1)
20
>>> norm(b, 1)
7
>>> norm(a, -1)
-4.6566128774142013e-010
>>> norm(b, -1)
6
>>> norm(a, 2)
7.745966692414834
>>> norm(b, 2)
7.3484692283495345
>>> norm(a, -2)
0
>>> norm(b, -2)
1.8570331885190563e-016
>>> norm(a, 3)
5.8480354764257312
>>> norm(a, -3)
0
"""
# Differs from numpy only in non-finite handling and the use of blas.
if check_finite:
a = np.asarray_chkfinite(a)
else:
a = np.asarray(a)
if a.size and a.dtype.char in 'fdFD' and axis is None and not keepdims:
if ord in (None, 2) and (a.ndim == 1):
# use blas for fast and stable euclidean norm
nrm2 = get_blas_funcs('nrm2', dtype=a.dtype, ilp64='preferred')
return nrm2(a)
if a.ndim == 2:
# Use lapack for a couple fast matrix norms.
# For some reason the *lange frobenius norm is slow.
lange_args = None
# Make sure this works if the user uses the axis keywords
# to apply the norm to the transpose.
if ord == 1:
if np.isfortran(a):
lange_args = '1', a
elif np.isfortran(a.T):
lange_args = 'i', a.T
elif ord == np.inf:
if np.isfortran(a):
lange_args = 'i', a
elif np.isfortran(a.T):
lange_args = '1', a.T
if lange_args:
lange = get_lapack_funcs('lange', dtype=a.dtype, ilp64='preferred')
return lange(*lange_args)
# fall back to numpy in every other case
return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
| 6,283
| 31.729167
| 83
|
py
|
scipy
|
scipy-main/scipy/linalg/decomp_lu.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _decomp_lu
__all__ = [ # noqa: F822
'lu', 'lu_solve', 'lu_factor',
'asarray_chkfinite', 'LinAlgWarning', 'get_lapack_funcs',
'get_flinalg_funcs'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.decomp_lu is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.decomp_lu` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_decomp_lu, name)
| 856
| 26.645161
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/_interpolative_backend.py
|
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
"""
Direct wrappers for Fortran `id_dist` backend.
"""
import scipy.linalg._interpolative as _id
import numpy as np
_RETCODE_ERROR = RuntimeError("nonzero return code")
def _asfortranarray_copy(A):
"""
Same as np.asfortranarray, but ensure a copy
"""
A = np.asarray(A)
if A.flags.f_contiguous:
A = A.copy(order="F")
else:
A = np.asfortranarray(A)
return A
#------------------------------------------------------------------------------
# id_rand.f
#------------------------------------------------------------------------------
def id_srand(n):
"""
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
:param n:
Number of pseudorandom numbers to generate.
:type n: int
:return:
Pseudorandom numbers.
:rtype: :class:`numpy.ndarray`
"""
return _id.id_srand(n)
def id_srandi(t):
"""
Initialize seed values for :func:`id_srand` (any appropriately random
numbers will do).
:param t:
Array of 55 seed values.
:type t: :class:`numpy.ndarray`
"""
t = np.asfortranarray(t)
_id.id_srandi(t)
def id_srando():
"""
Reset seed values to their original values.
"""
_id.id_srando()
#------------------------------------------------------------------------------
# idd_frm.f
#------------------------------------------------------------------------------
def idd_frm(n, w, x):
"""
Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idd_sfrm`, this routine works best when the length of
the transformed vector is the power-of-two integer output by
:func:`idd_frmi`, or when the length is not specified but instead
determined a posteriori from the output. The returned transformed vector is
randomly permuted.
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idd_frmi`; `n` is also the length of the output vector.
:type n: int
:param w:
Initialization array constructed by :func:`idd_frmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_frm(n, w, x)
def idd_sfrm(l, n, w, x):
"""
Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idd_frm`, this routine works best when the length of
the transformed vector is known a priori.
:param l:
Length of transformed vector, satisfying `l <= n`.
:type l: int
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idd_sfrmi`.
:type n: int
:param w:
Initialization array constructed by :func:`idd_sfrmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_sfrm(l, n, w, x)
def idd_frmi(m):
"""
Initialize data for :func:`idd_frm`.
:param m:
Length of vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idd_frm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_frmi(m)
def idd_sfrmi(l, m):
"""
Initialize data for :func:`idd_sfrm`.
:param l:
Length of output transformed vector.
:type l: int
:param m:
Length of the vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idd_sfrm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_sfrmi(l, m)
#------------------------------------------------------------------------------
# idd_id.f
#------------------------------------------------------------------------------
def iddp_id(eps, A):
"""
Compute ID of a real matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = _asfortranarray_copy(A)
k, idx, rnorms = _id.iddp_id(eps, A)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def iddr_id(A, k):
"""
Compute ID of a real matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = _asfortranarray_copy(A)
idx, rnorms = _id.iddr_id(A, k)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
def idd_reconid(B, idx, proj):
"""
Reconstruct matrix from real ID.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Reconstructed matrix.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
if proj.size > 0:
return _id.idd_reconid(B, idx, proj)
else:
return B[:, np.argsort(idx)]
def idd_reconint(idx, proj):
"""
Reconstruct interpolation matrix from real ID.
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Interpolation matrix.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_reconint(idx, proj)
def idd_copycols(A, k, idx):
"""
Reconstruct skeleton matrix from real ID.
:param A:
Original matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:return:
Skeleton matrix.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
return _id.idd_copycols(A, k, idx)
#------------------------------------------------------------------------------
# idd_id2svd.f
#------------------------------------------------------------------------------
def idd_id2svd(B, idx, proj):
"""
Convert real ID to SVD.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
U, V, S, ier = _id.idd_id2svd(B, idx, proj)
if ier:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idd_snorm.f
#------------------------------------------------------------------------------
def idd_snorm(m, n, matvect, matvec, its=20):
"""
Estimate spectral norm of a real matrix by the randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate.
:rtype: float
"""
snorm, v = _id.idd_snorm(m, n, matvect, matvec, its)
return snorm
def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20):
"""
Estimate spectral norm of the difference of two real matrices by the
randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the transpose of the first matrix to a vector, with
call signature `y = matvect(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect: function
:param matvect2:
Function to apply the transpose of the second matrix to a vector, with
call signature `y = matvect2(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matvect2: function
:param matvec:
Function to apply the first matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param matvec2:
Function to apply the second matrix to a vector, with call signature
`y = matvec2(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec2: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate of matrix difference.
:rtype: float
"""
return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its)
#------------------------------------------------------------------------------
# idd_svd.f
#------------------------------------------------------------------------------
def iddr_svd(A, k):
"""
Compute SVD of a real matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
U, V, S, ier = _id.iddr_svd(A, k)
if ier:
raise _RETCODE_ERROR
return U, V, S
def iddp_svd(eps, A):
"""
Compute SVD of a real matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# iddp_aid.f
#------------------------------------------------------------------------------
def iddp_aid(eps, A):
"""
Compute ID of a real matrix to a specified relative precision using random
sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idd_frmi(m)
proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F')
k, idx, proj = _id.iddp_aid(eps, A, w, proj)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idd_estrank(eps, A):
"""
Estimate rank of a real matrix to a specified relative precision using
random sampling.
The output rank is typically about 8 higher than the actual rank.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank estimate.
:rtype: int
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idd_frmi(m)
ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F')
k, ra = _id.idd_estrank(eps, A, w, ra)
return k
#------------------------------------------------------------------------------
# iddp_asvd.f
#------------------------------------------------------------------------------
def iddp_asvd(eps, A):
"""
Compute SVD of a real matrix to a specified relative precision using random
sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, winit = _id.idd_frmi(m)
w = np.empty(
max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2,
(2*n + 1)*(n2 + 1)),
order='F')
k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# iddp_rid.f
#------------------------------------------------------------------------------
def iddp_rid(eps, m, n, matvect):
"""
Compute ID of a real matrix to a specified relative precision using random
matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F')
k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj)
if ier != 0:
raise _RETCODE_ERROR
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idd_findrank(eps, m, n, matvect):
"""
Estimate rank of a real matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:return:
Rank estimate.
:rtype: int
"""
k, ra, ier = _id.idd_findrank(eps, m, n, matvect)
if ier:
raise _RETCODE_ERROR
return k
#------------------------------------------------------------------------------
# iddp_rsvd.f
#------------------------------------------------------------------------------
def iddp_rsvd(eps, m, n, matvect, matvec):
"""
Compute SVD of a real matrix to a specified relative precision using random
matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# iddr_aid.f
#------------------------------------------------------------------------------
def iddr_aid(A, k):
"""
Compute ID of a real matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = iddr_aidi(m, n, k)
idx, proj = _id.iddr_aid(A, k, w)
if k == n:
proj = np.empty((k, n-k), dtype='float64', order='F')
else:
proj = proj.reshape((k, n-k), order='F')
return idx, proj
def iddr_aidi(m, n, k):
"""
Initialize array for :func:`iddr_aid`.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param k:
Rank of ID.
:type k: int
:return:
Initialization array to be used by :func:`iddr_aid`.
:rtype: :class:`numpy.ndarray`
"""
return _id.iddr_aidi(m, n, k)
#------------------------------------------------------------------------------
# iddr_asvd.f
#------------------------------------------------------------------------------
def iddr_asvd(A, k):
"""
Compute SVD of a real matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F')
w_ = iddr_aidi(m, n, k)
w[:w_.size] = w_
U, V, S, ier = _id.iddr_asvd(A, k, w)
if ier != 0:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# iddr_rid.f
#------------------------------------------------------------------------------
def iddr_rid(m, n, matvect, k):
"""
Compute ID of a real matrix to a specified rank using random matrix-vector
multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
idx, proj = _id.iddr_rid(m, n, matvect, k)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
#------------------------------------------------------------------------------
# iddr_rsvd.f
#------------------------------------------------------------------------------
def iddr_rsvd(m, n, matvect, matvec, k):
"""
Compute SVD of a real matrix to a specified rank using random matrix-vector
multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matvect:
Function to apply the matrix transpose to a vector, with call signature
`y = matvect(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvect: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k)
if ier != 0:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idz_frm.f
#------------------------------------------------------------------------------
def idz_frm(n, w, x):
"""
Transform complex vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idz_sfrm`, this routine works best when the length of
the transformed vector is the power-of-two integer output by
:func:`idz_frmi`, or when the length is not specified but instead
determined a posteriori from the output. The returned transformed vector is
randomly permuted.
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idz_frmi`; `n` is also the length of the output vector.
:type n: int
:param w:
Initialization array constructed by :func:`idz_frmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_frm(n, w, x)
def idz_sfrm(l, n, w, x):
"""
Transform complex vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idz_frm`, this routine works best when the length of
the transformed vector is known a priori.
:param l:
Length of transformed vector, satisfying `l <= n`.
:type l: int
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idz_sfrmi`.
:type n: int
:param w:
Initialization array constructed by :func:`idd_sfrmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_sfrm(l, n, w, x)
def idz_frmi(m):
"""
Initialize data for :func:`idz_frm`.
:param m:
Length of vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idz_frm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_frmi(m)
def idz_sfrmi(l, m):
"""
Initialize data for :func:`idz_sfrm`.
:param l:
Length of output transformed vector.
:type l: int
:param m:
Length of the vector to be transformed.
:type m: int
:return:
Greatest power-of-two integer `n` satisfying `n <= m`.
:rtype: int
:return:
Initialization array to be used by :func:`idz_sfrm`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_sfrmi(l, m)
#------------------------------------------------------------------------------
# idz_id.f
#------------------------------------------------------------------------------
def idzp_id(eps, A):
"""
Compute ID of a complex matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = _asfortranarray_copy(A)
k, idx, rnorms = _id.idzp_id(eps, A)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idzr_id(A, k):
"""
Compute ID of a complex matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = _asfortranarray_copy(A)
idx, rnorms = _id.idzr_id(A, k)
n = A.shape[1]
proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
def idz_reconid(B, idx, proj):
"""
Reconstruct matrix from complex ID.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Reconstructed matrix.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
if proj.size > 0:
return _id.idz_reconid(B, idx, proj)
else:
return B[:, np.argsort(idx)]
def idz_reconint(idx, proj):
"""
Reconstruct interpolation matrix from complex ID.
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Interpolation matrix.
:rtype: :class:`numpy.ndarray`
"""
return _id.idz_reconint(idx, proj)
def idz_copycols(A, k, idx):
"""
Reconstruct skeleton matrix from complex ID.
:param A:
Original matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:return:
Skeleton matrix.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
return _id.idz_copycols(A, k, idx)
#------------------------------------------------------------------------------
# idz_id2svd.f
#------------------------------------------------------------------------------
def idz_id2svd(B, idx, proj):
"""
Convert complex ID to SVD.
:param B:
Skeleton matrix.
:type B: :class:`numpy.ndarray`
:param idx:
Column index array.
:type idx: :class:`numpy.ndarray`
:param proj:
Interpolation coefficients.
:type proj: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
B = np.asfortranarray(B)
U, V, S, ier = _id.idz_id2svd(B, idx, proj)
if ier:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idz_snorm.f
#------------------------------------------------------------------------------
def idz_snorm(m, n, matveca, matvec, its=20):
"""
Estimate spectral norm of a complex matrix by the randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate.
:rtype: float
"""
snorm, v = _id.idz_snorm(m, n, matveca, matvec, its)
return snorm
def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20):
"""
Estimate spectral norm of the difference of two complex matrices by the
randomized power method.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the adjoint of the first matrix to a vector, with
call signature `y = matveca(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matveca: function
:param matveca2:
Function to apply the adjoint of the second matrix to a vector, with
call signature `y = matveca2(x)`, where `x` and `y` are the input and
output vectors, respectively.
:type matveca2: function
:param matvec:
Function to apply the first matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param matvec2:
Function to apply the second matrix to a vector, with call signature
`y = matvec2(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec2: function
:param its:
Number of power method iterations.
:type its: int
:return:
Spectral norm estimate of matrix difference.
:rtype: float
"""
return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its)
#------------------------------------------------------------------------------
# idz_svd.f
#------------------------------------------------------------------------------
def idzr_svd(A, k):
"""
Compute SVD of a complex matrix to a specified rank.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
U, V, S, ier = _id.idzr_svd(A, k)
if ier:
raise _RETCODE_ERROR
return U, V, S
def idzp_svd(eps, A):
"""
Compute SVD of a complex matrix to a specified relative precision.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# idzp_aid.f
#------------------------------------------------------------------------------
def idzp_aid(eps, A):
"""
Compute ID of a complex matrix to a specified relative precision using
random sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idz_frmi(m)
proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F')
k, idx, proj = _id.idzp_aid(eps, A, w, proj)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idz_estrank(eps, A):
"""
Estimate rank of a complex matrix to a specified relative precision using
random sampling.
The output rank is typically about 8 higher than the actual rank.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Rank estimate.
:rtype: int
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, w = idz_frmi(m)
ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F')
k, ra = _id.idz_estrank(eps, A, w, ra)
return k
#------------------------------------------------------------------------------
# idzp_asvd.f
#------------------------------------------------------------------------------
def idzp_asvd(eps, A):
"""
Compute SVD of a complex matrix to a specified relative precision using
random sampling.
:param eps:
Relative precision.
:type eps: float
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
n2, winit = _id.idz_frmi(m)
w = np.empty(
max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2,
(2*n + 1)*(n2 + 1)),
dtype=np.complex128, order='F')
k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# idzp_rid.f
#------------------------------------------------------------------------------
def idzp_rid(eps, m, n, matveca):
"""
Compute ID of a complex matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:return:
Rank of ID.
:rtype: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
proj = np.empty(
m + 1 + 2*n*(min(m, n) + 1),
dtype=np.complex128, order='F')
k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj)
if ier:
raise _RETCODE_ERROR
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return k, idx, proj
def idz_findrank(eps, m, n, matveca):
"""
Estimate rank of a complex matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:return:
Rank estimate.
:rtype: int
"""
k, ra, ier = _id.idz_findrank(eps, m, n, matveca)
if ier:
raise _RETCODE_ERROR
return k
#------------------------------------------------------------------------------
# idzp_rsvd.f
#------------------------------------------------------------------------------
def idzp_rsvd(eps, m, n, matveca, matvec):
"""
Compute SVD of a complex matrix to a specified relative precision using
random matrix-vector multiplication.
:param eps:
Relative precision.
:type eps: float
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec)
if ier:
raise _RETCODE_ERROR
U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
S = w[iS-1:iS+k-1]
return U, V, S
#------------------------------------------------------------------------------
# idzr_aid.f
#------------------------------------------------------------------------------
def idzr_aid(A, k):
"""
Compute ID of a complex matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = idzr_aidi(m, n, k)
idx, proj = _id.idzr_aid(A, k, w)
if k == n:
proj = np.empty((k, n-k), dtype='complex128', order='F')
else:
proj = proj.reshape((k, n-k), order='F')
return idx, proj
def idzr_aidi(m, n, k):
"""
Initialize array for :func:`idzr_aid`.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param k:
Rank of ID.
:type k: int
:return:
Initialization array to be used by :func:`idzr_aid`.
:rtype: :class:`numpy.ndarray`
"""
return _id.idzr_aidi(m, n, k)
#------------------------------------------------------------------------------
# idzr_asvd.f
#------------------------------------------------------------------------------
def idzr_asvd(A, k):
"""
Compute SVD of a complex matrix to a specified rank using random sampling.
:param A:
Matrix.
:type A: :class:`numpy.ndarray`
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
A = np.asfortranarray(A)
m, n = A.shape
w = np.empty(
(2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90,
dtype='complex128', order='F')
w_ = idzr_aidi(m, n, k)
w[:w_.size] = w_
U, V, S, ier = _id.idzr_asvd(A, k, w)
if ier:
raise _RETCODE_ERROR
return U, V, S
#------------------------------------------------------------------------------
# idzr_rid.f
#------------------------------------------------------------------------------
def idzr_rid(m, n, matveca, k):
"""
Compute ID of a complex matrix to a specified rank using random
matrix-vector multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param k:
Rank of ID.
:type k: int
:return:
Column index array.
:rtype: :class:`numpy.ndarray`
:return:
Interpolation coefficients.
:rtype: :class:`numpy.ndarray`
"""
idx, proj = _id.idzr_rid(m, n, matveca, k)
proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
return idx, proj
#------------------------------------------------------------------------------
# idzr_rsvd.f
#------------------------------------------------------------------------------
def idzr_rsvd(m, n, matveca, matvec, k):
"""
Compute SVD of a complex matrix to a specified rank using random
matrix-vector multiplication.
:param m:
Matrix row dimension.
:type m: int
:param n:
Matrix column dimension.
:type n: int
:param matveca:
Function to apply the matrix adjoint to a vector, with call signature
`y = matveca(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matveca: function
:param matvec:
Function to apply the matrix to a vector, with call signature
`y = matvec(x)`, where `x` and `y` are the input and output vectors,
respectively.
:type matvec: function
:param k:
Rank of SVD.
:type k: int
:return:
Left singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Right singular vectors.
:rtype: :class:`numpy.ndarray`
:return:
Singular values.
:rtype: :class:`numpy.ndarray`
"""
U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k)
if ier:
raise _RETCODE_ERROR
return U, V, S
| 45,192
| 25.868609
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/decomp.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _decomp
__all__ = [ # noqa: F822
'eig', 'eigvals', 'eigh', 'eigvalsh',
'eig_banded', 'eigvals_banded',
'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf',
'array', 'isfinite', 'inexact', 'nonzero', 'iscomplexobj', 'cast',
'flatnonzero', 'argsort', 'iscomplex', 'einsum', 'eye', 'inf',
'LinAlgError', 'norm', 'get_lapack_funcs'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.decomp is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.decomp` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_decomp, name)
| 1,057
| 31.060606
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/setup.py
|
from os.path import join
import os
def configuration(parent_package='', top_path=None):
from distutils.sysconfig import get_python_inc
from numpy.distutils.system_info import get_info, numpy_info
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
from scipy._build_utils import (get_g77_abi_wrappers,
gfortran_legacy_flag_hook,
blas_ilp64_pre_build_hook,
get_f2py_int64_options,
uses_blas64)
config = Configuration('linalg', parent_package, top_path)
lapack_opt = get_info('lapack_opt')
atlas_version = ([v[3:-3] for k, v in lapack_opt.get('define_macros', [])
if k == 'ATLAS_INFO']+[None])[0]
if atlas_version:
print('ATLAS version: %s' % atlas_version)
if uses_blas64():
lapack_ilp64_opt = get_info('lapack_ilp64_opt', 2)
# fblas:
sources = ['fblas.pyf.src']
sources += get_g77_abi_wrappers(lapack_opt)
depends = ['fblas_l?.pyf.src']
config.add_extension('_fblas',
sources=sources,
depends=depends,
extra_info=lapack_opt
)
if uses_blas64():
sources = ['fblas_64.pyf.src'] + sources[1:]
ext = config.add_extension('_fblas_64',
sources=sources,
depends=depends,
f2py_options=get_f2py_int64_options(),
extra_info=lapack_ilp64_opt)
ext._pre_build_hook = blas_ilp64_pre_build_hook(lapack_ilp64_opt)
# flapack:
sources = ['flapack.pyf.src']
sources += get_g77_abi_wrappers(lapack_opt)
depends = ['flapack_gen.pyf.src',
'flapack_gen_banded.pyf.src',
'flapack_gen_tri.pyf.src',
'flapack_pos_def.pyf.src',
'flapack_pos_def_tri.pyf.src',
'flapack_sym_herm.pyf.src',
'flapack_other.pyf.src',
'flapack_user.pyf.src']
config.add_extension('_flapack',
sources=sources,
depends=depends,
extra_info=lapack_opt
)
if uses_blas64():
sources = ['flapack_64.pyf.src'] + sources[1:]
ext = config.add_extension('_flapack_64',
sources=sources,
depends=depends,
f2py_options=get_f2py_int64_options(),
extra_info=lapack_ilp64_opt)
ext._pre_build_hook = blas_ilp64_pre_build_hook(lapack_ilp64_opt)
if atlas_version is not None:
# cblas:
config.add_extension('_cblas',
sources=['cblas.pyf.src'],
depends=['cblas.pyf.src', 'cblas_l1.pyf.src'],
extra_info=lapack_opt
)
# clapack:
config.add_extension('_clapack',
sources=['clapack.pyf.src'],
depends=['clapack.pyf.src'],
extra_info=lapack_opt
)
# _flinalg:
config.add_extension('_flinalg',
sources=[join('src', 'det.f'), join('src', 'lu.f')],
extra_info=lapack_opt
)
# _interpolative:
ext = config.add_extension('_interpolative',
sources=[join('src', 'id_dist', 'src', '*.f'),
"interpolative.pyf"],
extra_info=lapack_opt
)
ext._pre_build_hook = gfortran_legacy_flag_hook
# _solve_toeplitz:
config.add_extension('_solve_toeplitz',
sources=[('_solve_toeplitz.c')],
include_dirs=[get_numpy_include_dirs()])
# _matfuncs_sqrtm_triu:
if int(os.environ.get('SCIPY_USE_PYTHRAN', 1)):
import pythran
ext = pythran.dist.PythranExtension(
'scipy.linalg._matfuncs_sqrtm_triu',
sources=["scipy/linalg/_matfuncs_sqrtm_triu.py"],
config=['compiler.blas=none'])
config.ext_modules.append(ext)
else:
config.add_extension('_matfuncs_sqrtm_triu',
sources=[('_matfuncs_sqrtm_triu.c')],
include_dirs=[get_numpy_include_dirs()])
config.add_data_dir('tests')
# Cython BLAS/LAPACK
config.add_data_files('cython_blas.pxd')
config.add_data_files('cython_lapack.pxd')
sources = ['_blas_subroutine_wrappers.f', '_lapack_subroutine_wrappers.f']
sources += get_g77_abi_wrappers(lapack_opt)
includes = numpy_info().get_include_dirs() + [get_python_inc()]
config.add_library('fwrappers', sources=sources, include_dirs=includes)
config.add_extension('cython_blas',
sources=['cython_blas.c'],
depends=['cython_blas.pyx', 'cython_blas.pxd',
'fortran_defs.h', '_blas_subroutines.h'],
include_dirs=['.'],
libraries=['fwrappers'],
extra_info=lapack_opt)
config.add_extension('cython_lapack',
sources=['cython_lapack.c'],
depends=['cython_lapack.pyx', 'cython_lapack.pxd',
'fortran_defs.h', '_lapack_subroutines.h'],
include_dirs=['.'],
libraries=['fwrappers'],
extra_info=lapack_opt)
config.add_extension('_cythonized_array_utils',
sources=['_cythonized_array_utils.c'],
depends=['_cythonized_array_utils.pyx',
'_cythonized_array_utils.pxd'],
include_dirs=['.']
)
config.add_data_files('_cythonized_array_utils.pxd')
config.add_extension('_decomp_update', sources=['_decomp_update.c'])
config.add_extension('_decomp_lu_cython', sources=['_decomp_lu_cython.c'])
config.add_extension('_matfuncs_expm', sources=['_matfuncs_expm.c'])
# Add any license files
config.add_data_files('src/id_dist/doc/doc.tex')
config.add_data_files('src/lapack_deprecations/LICENSE')
# Type stubs
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 6,804
| 38.109195
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_flinalg_py.py
|
#
# Author: Pearu Peterson, March 2002
#
__all__ = ['get_flinalg_funcs']
# The following ensures that possibly missing flavor (C or Fortran) is
# replaced with the available one. If none is available, exception
# is raised at the first attempt to use the resources.
try:
from . import _flinalg
except ImportError:
_flinalg = None
def has_column_major_storage(arr):
return arr.flags['FORTRAN']
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',..
def get_flinalg_funcs(names,arrays=(),debug=0):
"""Return optimal available _flinalg function objects with
names. Arrays are used to determine optimal prefix."""
ordering = []
for i, ar in enumerate(arrays):
t = ar.dtype.char
if t not in _type_conv:
t = 'd'
ordering.append((t,i))
if ordering:
ordering.sort()
required_prefix = _type_conv[ordering[0][0]]
else:
required_prefix = 'd'
# Some routines may require special treatment.
# Handle them here before the default lookup.
# Default lookup:
if ordering and has_column_major_storage(arrays[ordering[0][1]]):
suffix1,suffix2 = '_c','_r'
else:
suffix1,suffix2 = '_r','_c'
funcs = []
for name in names:
func_name = required_prefix + name
func = getattr(_flinalg,func_name+suffix1,
getattr(_flinalg,func_name+suffix2,None))
funcs.append(func)
return tuple(funcs)
| 1,489
| 27.113208
| 83
|
py
|
scipy
|
scipy-main/scipy/linalg/_matfuncs_inv_ssq.py
|
"""
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg._decomp_schur import schur, rsf2csf
from scipy.linalg._matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def _rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def _matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
def _adjoint(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when SciPy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] Robert M. Corless and David J. Jeffrey,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
David J. Jeffrey and Stephen M. Watt,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as formulated in the reference does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this constraint is relaxed.
References
----------
.. [1] Awad H. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Care has been taken to return a real number if possible when
all of the inputs are real numbers.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l2 - l1) > abs(l1 + l2) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is like Eq. (11.28) in [1]_, except the determination of whether
l1 and l2 are sufficiently far apart has been modified.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Care has been taken to return a real number if possible when
all of the inputs are real numbers.
References
----------
.. [1] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l2 - l1) > abs(l1 + l2) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
u = _unwindk(np.log(l2) - np.log(l1))
if u:
f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
The degree of the Pade approximation.
Notes
-----
This subroutine appears as a chunk of lines within
a couple of published algorithms; for example it appears
as lines 4--35 in algorithm (3.1) of [1]_, and
as lines 3--34 in algorithm (4.1) of [2]_.
The instances of 'goto line 38' in algorithm (3.1) of [1]_
probably mean 'goto line 36' and have been intepreted accordingly.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T0.shape
T = T0
# Find s0, the smallest s such that the spectral radius
# of a certain diagonal matrix is at most theta[7].
# Note that because theta[7] < 1,
# this search will not terminate if any diagonal entry of T is zero.
s0 = 0
tmp_diag = np.diag(T)
if np.count_nonzero(tmp_diag) != n:
raise Exception('Diagonal entries of T must be nonzero')
while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
tmp_diag = np.sqrt(tmp_diag)
s0 += 1
# Take matrix square roots of T.
for i in range(s0):
T = _sqrtm_triu(T)
# Flow control in this section is a little odd.
# This is because I am translating algorithm descriptions
# which have GOTOs in the publication.
s = s0
k = 0
d2 = _onenormest_m1_power(T, 2) ** (1/2)
d3 = _onenormest_m1_power(T, 3) ** (1/3)
a2 = max(d2, d3)
m = None
for i in (1, 2):
if a2 <= theta[i]:
m = i
break
while m is None:
if s > s0:
d3 = _onenormest_m1_power(T, 3) ** (1/3)
d4 = _onenormest_m1_power(T, 4) ** (1/4)
a3 = max(d3, d4)
if a3 <= theta[7]:
j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
if j1 <= 6:
m = j1
break
elif a3 / 2 <= theta[5] and k < 2:
k += 1
T = _sqrtm_triu(T)
s += 1
continue
d5 = _onenormest_m1_power(T, 5) ** (1/5)
a4 = max(d4, d5)
eta = min(a3, a4)
for i in (6, 7):
if eta <= theta[i]:
m = i
break
if m is not None:
break
T = _sqrtm_triu(T)
s += 1
# The subtraction of the identity is redundant here,
# because the diagonal will be replaced for improved numerical accuracy,
# but this formulation should help clarify the meaning of R.
R = T - np.identity(n)
# Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
# using formulas that have less subtractive cancellation.
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
for j in range(n):
a = T0[j, j]
r = _briggs_helper_function(a, s)
R[j, j] = r
p = np.exp2(-s)
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
R[j, j+1] = f12
# Return the T-I matrix, the number of square roots, and the Pade degree.
if not np.array_equal(R, np.triu(R)):
raise Exception('R is not upper triangular')
return R, s, m
def _fractional_power_pade_constant(i, t):
# A helper function for matrix fractional power.
if i < 1:
raise ValueError('expected a positive integer i')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
if i == 1:
return -t
elif i % 2 == 0:
j = i // 2
return (-j + t) / (2 * (2*j - 1))
elif i % 2 == 1:
j = (i - 1) // 2
return (-j - t) / (2 * (2*j + 1))
else:
raise Exception(f'unnexpected value of i, i = {i}')
def _fractional_power_pade(R, t, m):
"""
Evaluate the Pade approximation of a fractional matrix power.
Evaluate the degree-m Pade approximation of R
to the fractional matrix power t using the continued fraction
in bottom-up fashion using algorithm (4.1) in [1]_.
Parameters
----------
R : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
m : positive integer
Degree of Pade approximation.
Returns
-------
U : (N, N) array_like
The degree-m Pade approximation of R to the fractional power t.
This matrix will be upper triangular.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if m < 1 or int(m) != m:
raise ValueError('expected a positive integer m')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
R = np.asarray(R)
if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = R.shape
ident = np.identity(n)
Y = R * _fractional_power_pade_constant(2*m, t)
for j in range(2*m - 1, 0, -1):
rhs = R * _fractional_power_pade_constant(j, t)
Y = solve_triangular(ident + Y, rhs)
U = ident + Y
if not np.array_equal(U, np.triu(U)):
raise Exception('U is not upper triangular')
return U
def _remainder_matrix_power_triu(T, t):
"""
Compute a fractional power of an upper triangular matrix.
The fractional power is restricted to fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
m_to_theta = {
1: 1.51e-5,
2: 2.24e-3,
3: 1.88e-2,
4: 6.04e-2,
5: 1.24e-1,
6: 2.00e-1,
7: 2.79e-1,
}
n, n = T.shape
T0 = T
T0_diag = np.diag(T0)
if np.array_equal(T0, np.diag(T0_diag)):
U = np.diag(T0_diag ** t)
else:
R, s, m = _inverse_squaring_helper(T0, m_to_theta)
# Evaluate the Pade approximation.
# Note that this function expects the negative of the matrix
# returned by the inverse squaring helper.
U = _fractional_power_pade(-R, t, m)
# Undo the inverse scaling and squaring.
# Be less clever about this
# if the principal branch does not exist at T0;
# this happens when a diagonal entry of T0
# is negative with imaginary part 0.
eivals = np.diag(T0)
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
for i in range(s, -1, -1):
if i < s:
U = U.dot(U)
else:
if has_principal_branch:
p = t * np.exp2(-i)
U[np.diag_indices(n)] = T0_diag ** p
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
U[j, j+1] = f12
if not np.array_equal(U, np.triu(U)):
raise Exception('U is not upper triangular')
return U
def _remainder_matrix_power(A, t):
"""
Compute the fractional power of a matrix, for fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if np.count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def _fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
See the fractional_matrix_power docstring in matfuncs.py for more info.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('U is not upper triangular')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
return T
def _logm(A):
"""
Compute the matrix logarithm.
See the logm docstring in matfuncs.py for more info.
Notes
-----
In this function we look at triangular matrices that are similar
to the input matrix. If any diagonal entry of such a triangular matrix
is exactly zero then the original matrix is singular.
The matrix logarithm does not exist for such matrices,
but in such cases we will pretend that the diagonal entries that are zero
are actually slightly positive by an ad-hoc amount, in the interest
of returning something more useful than NaN. This will cause a warning.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A)) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
except (SqrtmError, LogmError):
X = np.empty_like(A)
X.fill(np.nan)
return X
| 28,030
| 30.602029
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_qr.py
|
"""QR decomposition functions."""
import numpy
# Local imports
from .lapack import get_lapack_funcs
from ._misc import _datacopied
__all__ = ['qr', 'qr_multiply', 'rq']
def safecall(f, name, *args, **kwargs):
"""Call a LAPACK routine, determining lwork automatically and handling
error return values"""
lwork = kwargs.get("lwork", None)
if lwork in (None, -1):
kwargs['lwork'] = -1
ret = f(*args, **kwargs)
kwargs['lwork'] = ret[-2][0].real.astype(numpy.int_)
ret = f(*args, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %dth argument of internal %s"
% (-ret[-1], name))
return ret[:-2]
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
check_finite=True):
"""
Compute QR decomposition of a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in `a` is overwritten (may improve performance if
`overwrite_a` is set to True by reusing the existing input data
structure rather than creating a new one.)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic', 'raw'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes). The final option 'raw'
(added in SciPy 0.11) makes the function return two matrices
(Q, TAU) in the internal format used by LAPACK.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
``A P = Q R`` as above, but where P is chosen such that the diagonal
of R is non-increasing.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
if ``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : int ndarray
Of shape (N,) for ``pivoting=True``. Not returned if
``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> q, r = linalg.qr(a)
>>> np.allclose(a, np.dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> np.allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = np.abs(np.diag(r4))
>>> np.all(d[1:] <= d[:-1])
True
>>> np.allclose(a[:, p4], np.dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used internally by qr_multiply
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError("Mode argument should be one of ['full', 'r',"
"'economic', 'raw']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError("expected a 2-D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
overwrite_a=False, overwrite_c=False):
"""
Calculate the QR decomposition and multiply Q with a matrix.
Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
and R upper triangular. Multiply Q with a vector or a matrix c.
Parameters
----------
a : (M, N), array_like
Input array
c : array_like
Input array to be multiplied by ``q``.
mode : {'left', 'right'}, optional
``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if
mode is 'right'.
The shape of c must be appropriate for the matrix multiplications,
if mode is 'left', ``min(a.shape) == c.shape[0]``,
if mode is 'right', ``a.shape[0] == c.shape[1]``.
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition, see the documentation of qr.
conjugate : bool, optional
Whether Q should be complex-conjugated. This might be faster
than explicit conjugation.
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
overwrite_c : bool, optional
Whether data in c is overwritten (may improve performance).
If this is used, c must be big enough to keep the result,
i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'.
Returns
-------
CQ : ndarray
The product of ``Q`` and ``c``.
R : (K, N), ndarray
R array of the resulting QR factorization where ``K = min(M, N)``.
P : (N,) ndarray
Integer pivot array. Only returned when ``pivoting=True``.
Raises
------
LinAlgError
Raised if QR decomposition fails.
Notes
-----
This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``,
``?UNMQR``, and ``?GEQP3``.
.. versionadded:: 0.11.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import qr_multiply, qr
>>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]])
>>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1)
>>> qc
array([[-1., 1., -1.],
[-1., -1., 1.],
[-1., -1., -1.],
[-1., 1., 1.]])
>>> r1
array([[-6., -3., -5. ],
[ 0., -1., -1.11022302e-16],
[ 0., 0., -1. ]])
>>> piv1
array([1, 0, 2], dtype=int32)
>>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1)
>>> np.allclose(2*q2 - qc, np.zeros((4, 3)))
True
"""
if mode not in ['left', 'right']:
raise ValueError("Mode argument can only be 'left' or 'right' but "
"not '{}'".format(mode))
c = numpy.asarray_chkfinite(c)
if c.ndim < 2:
onedim = True
c = numpy.atleast_2d(c)
if mode == "left":
c = c.T
else:
onedim = False
a = numpy.atleast_2d(numpy.asarray(a)) # chkfinite done in qr
M, N = a.shape
if mode == 'left':
if c.shape[0] != min(M, N + overwrite_c*(M-N)):
raise ValueError('Array shapes are not compatible for Q @ c'
' operation: {} vs {}'.format(a.shape, c.shape))
else:
if M != c.shape[1]:
raise ValueError('Array shapes are not compatible for c @ Q'
' operation: {} vs {}'.format(c.shape, a.shape))
raw = qr(a, overwrite_a, None, "raw", pivoting)
Q, tau = raw[0]
gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
if gor_un_mqr.typecode in ('s', 'd'):
trans = "T"
else:
trans = "C"
Q = Q[:, :min(M, N)]
if M > N and mode == "left" and not overwrite_c:
if conjugate:
cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
cc[:, :N] = c.T
else:
cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
cc[:N, :] = c
trans = "N"
if conjugate:
lr = "R"
else:
lr = "L"
overwrite_c = True
elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
cc = c.T
if mode == "left":
lr = "R"
else:
lr = "L"
else:
trans = "N"
cc = c
if mode == "left":
lr = "L"
else:
lr = "R"
cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
overwrite_c=overwrite_c)
if trans != "N":
cQ = cQ.T
if mode == "right":
cQ = cQ[:, :min(M, N)]
if onedim:
cQ = cQ.ravel()
return (cQ,) + raw[1:]
def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True):
"""
Compute RQ decomposition of a matrix.
Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}, optional
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
R : float or complex ndarray
Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``.
Q : float or complex ndarray
Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned
if ``mode='r'``.
Raises
------
LinAlgError
If decomposition fails.
Notes
-----
This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf,
sorgrq, dorgrq, cungrq and zungrq.
If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead
of (N,N) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((6, 9))
>>> r, q = linalg.rq(a)
>>> np.allclose(a, r @ q)
True
>>> r.shape, q.shape
((6, 9), (9, 9))
>>> r2 = linalg.rq(a, mode='r')
>>> np.allclose(r, r2)
True
>>> r3, q3 = linalg.rq(a, mode='economic')
>>> r3.shape, q3.shape
((6, 6), (6, 9))
"""
if mode not in ['full', 'r', 'economic']:
raise ValueError(
"Mode argument should be one of ['full', 'r', 'economic']")
if check_finite:
a1 = numpy.asarray_chkfinite(a)
else:
a1 = numpy.asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gerqf, = get_lapack_funcs(('gerqf',), (a1,))
rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork,
overwrite_a=overwrite_a)
if not mode == 'economic' or N < M:
R = numpy.triu(rq, N-M)
else:
R = numpy.triu(rq[-M:, -M:])
if mode == 'r':
return R
gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
if N < M:
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork,
overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork,
overwrite_a=1)
else:
rq1 = numpy.empty((N, N), dtype=rq.dtype)
rq1[-M:] = rq
Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork,
overwrite_a=1)
return R, Q
| 13,727
| 30.925581
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/misc.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _misc
__all__ = [ # noqa: F822
'LinAlgError', 'LinAlgWarning', 'norm', 'get_blas_funcs',
'get_lapack_funcs'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.misc is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.misc` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_misc, name)
| 799
| 26.586207
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/_basic.py
|
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from warnings import warn
from itertools import product
import numpy as np
from numpy import atleast_1d, atleast_2d
from .lapack import get_lapack_funcs, _compute_lwork
from ._misc import LinAlgError, _datacopied, LinAlgWarning
from ._decomp import _asarray_validated
from . import _decomp, _decomp_svd
from ._solve_toeplitz import levinson
from ._cythonized_array_utils import find_det_from_lu
from scipy._lib.deprecation import _NoValue
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
# The numpy facilities for type-casting checks are too slow for small sized
# arrays and eat away the time budget for the checkups. Here we set a
# precomputed dict container of the numpy.can_cast() table.
# It can be used to determine quickly what a dtype can be cast to LAPACK
# compatible types, i.e., 'float32, float64, complex64, complex128'.
# Then it can be checked via "casting_dict[arr.dtype.char]"
lapack_cast_dict = {x: ''.join([y for y in 'fdFD' if np.can_cast(x, y)])
for x in np.typecodes['All']}
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, lower=False, overwrite_a=False,
overwrite_b=False, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a @ x == b`` for the unknown ``x``
for square `a` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
lower : bool, default: False
Ignored if ``assume_a == 'gen'`` (the default). If True, the
calculation uses only the data in the lower triangle of `a`;
entries above the diagonal are ignored. If False (default), the
calculation uses only the data in the upper triangle of `a`; entries
below the diagonal are ignored.
overwrite_a : bool, default: False
Allow overwriting data in `a` (may enhance performance).
overwrite_b : bool, default: False
Allow overwriting data in `b` (may enhance performance).
check_finite : bool, default: True
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, {'gen', 'sym', 'her', 'pos'}
Valid entries are explained above.
transposed : bool, default: False
If True, solve ``a.T @ x == b``. Raises `NotImplementedError`
for complex `a`.
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Notes
-----
If the input b matrix is a 1-D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1-D array.
The generic, symmetric, Hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> import numpy as np
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
"""
# Flags for 1-D or N-D right-hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1-D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1-D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# for a real matrix, describe it as "symmetric", not "hermitian"
# (lapack doesn't know what to do with real hermitian matrices)
if assume_a == 'her' and not np.iscomplexobj(a1):
assume_a = 'sym'
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> import numpy as np
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('shapes of a {} and b {} are incompatible'
.format(a1.shape, b1.shape))
overwrite_b = overwrite_b or _datacopied(b1, b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
if a1.flags.f_contiguous or trans == 2:
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
else:
# transposed system is solved since trtrs expects Fortran ordering
x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
trans=not trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %dth argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> import numpy as np
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
Uses Thomas' Algorithm, which is more efficient than standard LU
factorization, but should only be used for Hermitian positive-definite
matrices.
The matrix ``a`` is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of ``a`` is (6, 6), number of upper diagonals,
``u`` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (``u`` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``a x = b``. Shape of return matches shape
of `b`.
Notes
-----
In the case of a non-positive definite matrix ``a``, the solver
`solve_banded` may be used.
Examples
--------
Solve the banded system ``A x = b``, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> import numpy as np
>>> from scipy.linalg import solveh_banded
``ab`` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system ``H x = b``, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array ``hb``:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> import numpy as np
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
c_or_cr, b, check_finite, keep_b_shape=True)
# Form a 1-D array of values to be used in the matrix, containing a
# reversed copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError(f"'{aname}axis' entry is out of bounds")
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a 1-D vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Shapes of c {} and b {} are incompatible'
.format(c.shape, b.shape))
fc = np.fft.fft(np.moveaxis(c, caxis, -1), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.moveaxis(b, baxis, -1), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.moveaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2D.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant is a scalar that is a function of the associated square
matrix coefficients. The determinant value is zero for singular matrices.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : (...) float or complex
Determinant of `a`. For stacked arrays, a scalar is returned for each
(m, m) slice in the last two dimensions of the input. For example, an
input of shape (p, q, m, m) will produce a result of shape (p, q). If
all dimensions are 1 a scalar is returned regardless of ndim.
Notes
-----
The determinant is computed by performing an LU factorization of the
input with LAPACK routine 'getrf', and then calculating the product of
diagonal entries of the U factor.
Even the input array is single precision (float32 or complex64), the result
will be returned in double precision (float64 or complex128) to prevent
overflows.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]]) # A singular matrix
>>> linalg.det(a)
0.0
>>> b = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(b)
3.0
>>> # An array with the shape (3, 2, 2, 2)
>>> c = np.array([[[[1., 2.], [3., 4.]],
... [[5., 6.], [7., 8.]]],
... [[[9., 10.], [11., 12.]],
... [[13., 14.], [15., 16.]]],
... [[[17., 18.], [19., 20.]],
... [[21., 22.], [23., 24.]]]])
>>> linalg.det(c) # The resulting shape is (3, 2)
array([[-2., -2.],
[-2., -2.],
[-2., -2.]])
>>> linalg.det(c[0, 0]) # Confirm the (0, 0) slice, [[1, 2], [3, 4]]
-2.0
"""
# The goal is to end up with a writable contiguous array to pass to Cython
# First we check and make arrays.
a1 = np.asarray_chkfinite(a) if check_finite else np.asarray(a)
if a1.ndim < 2:
raise ValueError('The input array must be at least two-dimensional.')
if a1.shape[-1] != a1.shape[-2]:
raise ValueError('Last 2 dimensions of the array must be square'
f' but received shape {a1.shape}.')
# Also check if dtype is LAPACK compatible
if a1.dtype.char not in 'fdFD':
dtype_char = lapack_cast_dict[a1.dtype.char]
if not dtype_char: # No casting possible
raise TypeError(f'The dtype "{a1.dtype.name}" cannot be cast '
'to float(32, 64) or complex(64, 128).')
a1 = a1.astype(dtype_char[0]) # makes a copy, free to scratch
overwrite_a = True
# Empty array has determinant 1 because math.
if min(*a1.shape) == 0:
if a1.ndim == 2:
return np.float64(1.)
else:
return np.ones(shape=a1.shape[:-2], dtype=np.float64)
# Scalar case
if a1.shape[-2:] == (1, 1):
# Either ndarray with spurious singletons or a single element
if max(*a1.shape) > 1:
temp = np.squeeze(a1)
if a1.dtype.char in 'dD':
return temp
else:
return (temp.astype('d') if a1.dtype.char == 'f' else
temp.astype('D'))
else:
return (np.float64(a1.item()) if a1.dtype.char in 'fd' else
np.complex128(a1.item()))
# Then check overwrite permission
if not _datacopied(a1, a): # "a" still alive through "a1"
if not overwrite_a:
# Data belongs to "a" so make a copy
a1 = a1.copy(order='C')
# else: Do nothing we'll use "a" if possible
# else: a1 has its own data thus free to scratch
# Then layout checks, might happen that overwrite is allowed but original
# array was read-only or non-C-contiguous.
if not (a1.flags['C_CONTIGUOUS'] and a1.flags['WRITEABLE']):
a1 = a1.copy(order='C')
if a1.ndim == 2:
det = find_det_from_lu(a1)
# Convert float, complex to to NumPy scalars
return (np.float64(det) if np.isrealobj(det) else np.complex128(det))
# loop over the stacked array, and avoid overflows for single precision
# Cf. np.linalg.det(np.diag([1e+38, 1e+38]).astype(np.float32))
dtype_char = a1.dtype.char
if dtype_char in 'fF':
dtype_char = 'd' if dtype_char.islower() else 'D'
det = np.empty(a1.shape[:-2], dtype=dtype_char)
for ind in product(*[range(x) for x in a1.shape[:-2]]):
det[ind] = find_det_from_lu(a1[ind])
return det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left-hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``cond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``ndim(A) == n`` (returns a scalar if ``b`` is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of ``a`` is
``s[0] / s[-1]``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, atol=None, rtol=None, return_rank=False, check_finite=True,
cond=_NoValue, rcond=_NoValue):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition ``U @ S @ V`` in the economy mode and picking
up only the columns/rows that are associated with significant singular
values.
If ``s`` is the maximum singular value of ``a``, then the
significance cut-off value is determined by ``atol + rtol * s``. Any
singular value below this value is assumed insignificant.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
atol : float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol : float, optional
Relative threshold term, default value is ``max(M, N) * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. deprecated:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in SciPy 1.13.0.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
pinvh : Moore-Penrose pseudoinverse of a hermititan matrix.
Notes
-----
If ``A`` is invertible then the Moore-Penrose pseudoinverse is exactly
the inverse of ``A`` [1]_. If ``A`` is not invertible then the
Moore-Penrose pseudoinverse computes the ``x`` solution to ``Ax = b`` such
that ``||Ax - b||`` is minimized [1]_.
References
----------
.. [1] Penrose, R. (1956). On best approximate solutions of linear matrix
equations. Mathematical Proceedings of the Cambridge Philosophical
Society, 52(1), 17-19. doi:10.1017/S0305004100030929
Examples
--------
Given an ``m x n`` matrix ``A`` and an ``n x m`` matrix ``B`` the four
Moore-Penrose conditions are:
1. ``ABA = A`` (``B`` is a generalized inverse of ``A``),
2. ``BAB = B`` (``A`` is a generalized inverse of ``B``),
3. ``(AB)* = AB`` (``AB`` is hermitian),
4. ``(BA)* = BA`` (``BA`` is hermitian) [1]_.
Here, ``A*`` denotes the conjugate transpose. The Moore-Penrose
pseudoinverse is a unique ``B`` that satisfies all four of these
conditions and exists for any ``A``. Note that, unlike the standard
matrix inverse, ``A`` does not have to be square or have
independant columns/rows.
As an example, we can calculate the Moore-Penrose pseudoinverse of a
random non-square matrix and verify it satisfies the four conditions.
>>> import numpy as np
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((9, 6))
>>> B = linalg.pinv(A)
>>> np.allclose(A @ B @ A, A) # Condition 1
True
>>> np.allclose(B @ A @ B, B) # Condition 2
True
>>> np.allclose((A @ B).conj().T, A @ B) # Condition 3
True
>>> np.allclose((B @ A).conj().T, B @ A) # Condition 4
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = _decomp_svd.svd(a, full_matrices=False, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(s)
if rcond is not _NoValue or cond is not _NoValue:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in SciPy 1.13.0. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if ((rcond not in (_NoValue, None) or cond not in (_NoValue, None))
and (atol is None) and (rtol is None)):
atol = rcond if rcond not in (_NoValue, None) else cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
rank = np.sum(s > val)
u = u[:, :rank]
u /= s[:rank]
B = (u @ vh[:rank]).conj().T
if return_rank:
return B, rank
else:
return B
def pinvh(a, atol=None, rtol=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a complex Hermitian/real symmetric
matrix using its eigenvalue decomposition and including all eigenvalues
with 'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
atol : float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol : float, optional
Relative threshold term, default value is ``N * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If eigenvalue algorithm does not converge.
See Also
--------
pinv : Moore-Penrose pseudoinverse of a matrix.
Examples
--------
For a more detailed example see `pinv`.
>>> import numpy as np
>>> from scipy.linalg import pinvh
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = _decomp.eigh(a, lower=lower, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(np.abs(s))
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
above_cutoff = (abs(s) > val)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = (u * psigma_diag) @ u.conj().T
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1-D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
References
----------
.. [1] B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, :doi:`10.1007/BF02165404`
.. [2] R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, :arxiv:`1401.5766`
.. [3] D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape : bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square : bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape
def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
"""Efficient Toeplitz Matrix-Matrix Multiplication using FFT
This function returns the matrix multiplication between a Toeplitz
matrix and a dense matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
x : (M,) or (M, K) array_like
Matrix with which to multiply.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
workers : int, optional
To pass to scipy.fft.fft and ifft. Maximum number of workers to use
for parallel computation. If negative, the value wraps around from
``os.cpu_count()``. See scipy.fft.fft for more details.
Returns
-------
T @ x : (M,) or (M, K) ndarray
The result of the matrix multiplication ``T @ x``. Shape of return
matches shape of `x`.
See Also
--------
toeplitz : Toeplitz matrix
solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
Notes
-----
The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
to efficiently calculate the matrix-matrix product.
Because the computation is based on the FFT, integer inputs will
result in floating point outputs. This is unlike NumPy's `matmul`,
which preserves the data type of the input.
This is partly based on the implementation that can be found in [1]_,
licensed under the MIT license. More information about the method can be
found in reference [2]_. References [3]_ and [4]_ have more reference
implementations in Python.
.. versionadded:: 1.6.0
References
----------
.. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
Gaussian Process Inference with GPU Acceleration" with contributions
from Max Balandat and Ruihan Wu. Available online:
https://github.com/cornellius-gp/gpytorch
.. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
.. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
package for audio room simulations and array processing algorithms,
Proc. IEEE ICASSP, Calgary, CA, 2018.
https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
pyroomacoustics/adaptive/util.py
.. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
the Seismological Society of America., January, 2017. Vol. 107(1),
pp. 276-291.
Examples
--------
Multiply the Toeplitz matrix T with matrix x::
[ 1 -1 -2 -3] [1 10]
T = [ 3 1 -1 -2] x = [2 11]
[ 6 3 1 -1] [2 11]
[10 6 3 1] [5 19]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> import numpy as np
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
>>> from scipy.linalg import toeplitz, matmul_toeplitz
>>> matmul_toeplitz((c, r), x)
array([[-20., -80.],
[ -7., -8.],
[ 9., 85.],
[ 33., 218.]])
Check the result by creating the full Toeplitz matrix and
multiplying it by ``x``.
>>> toeplitz(c, r) @ x
array([[-20, -80],
[ -7, -8],
[ 9, 85],
[ 33, 218]])
The full matrix is never formed explicitly, so this routine
is suitable for very large Toeplitz matrices.
>>> n = 1000000
>>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
array([1., 1., 1., ..., 1., 1., 1.])
"""
from ..fft import fft, ifft, rfft, irfft
r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
n, m = x.shape
T_nrows = len(c)
T_ncols = len(r)
p = T_nrows + T_ncols - 1 # equivalent to len(embedded_col)
embedded_col = np.concatenate((c, r[-1:0:-1]))
if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = fft(x, n=p, axis=0, workers=workers)
mat_times_x = ifft(fft_mat*fft_x, axis=0,
workers=workers)[:T_nrows, :]
else:
# Real inputs; using rfft is faster
fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = rfft(x, n=p, axis=0, workers=workers)
mat_times_x = irfft(fft_mat*fft_x, axis=0,
workers=workers, n=p)[:T_nrows, :]
return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
return mat_times_x.reshape(*return_shape)
| 69,470
| 34.939472
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/decomp_qr.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _decomp_qr
__all__ = [ # noqa: F822
'qr', 'qr_multiply', 'rq', 'get_lapack_funcs', 'safecall'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.decomp_qr is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.decomp_qr` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_decomp_qr, name)
| 796
| 27.464286
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/_special_matrices.py
|
import math
import warnings
import numpy as np
from numpy.lib.stride_tricks import as_strided
__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
'hadamard', 'leslie', 'kron', 'block_diag', 'companion',
'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft',
'fiedler', 'fiedler_companion', 'convolution_matrix']
# -----------------------------------------------------------------------------
# matrix construction functions
# -----------------------------------------------------------------------------
#
# *Note*: tri{,u,l} is implemented in NumPy, but an important bug was fixed in
# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards
# compatibility.
def tri(N, M=None, k=0, dtype=None):
"""
.. deprecated:: 1.11.0
`tri` is deprecated in favour of `numpy.tri` and will be removed in
SciPy 1.13.0.
Construct (N, M) matrix filled with ones at and below the kth diagonal.
The matrix has A[i,j] == 1 for j <= i + k
Parameters
----------
N : int
The size of the first dimension of the matrix.
M : int or None, optional
The size of the second dimension of the matrix. If `M` is None,
`M = N` is assumed.
k : int, optional
Number of subdiagonal below which matrix is filled with ones.
`k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0
superdiagonal.
dtype : dtype, optional
Data type of the matrix.
Returns
-------
tri : (N, M) ndarray
Tri matrix.
Examples
--------
>>> from scipy.linalg import tri
>>> tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> tri(3, 5, -1, dtype=int)
array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0]])
"""
warnings.warn("'tri'/'tril/'triu' are deprecated as of SciPy 1.11.0 and "
"will be removed in v1.13.0. Please use "
"numpy.(tri/tril/triu) instead.",
DeprecationWarning, stacklevel=2)
if M is None:
M = N
if isinstance(M, str):
# pearu: any objections to remove this feature?
# As tri(N,'d') is equivalent to tri(N,dtype='d')
dtype = M
M = N
m = np.greater_equal.outer(np.arange(k, N+k), np.arange(M))
if dtype is None:
return m
else:
return m.astype(dtype)
def tril(m, k=0):
"""
.. deprecated:: 1.11.0
`tril` is deprecated in favour of `numpy.tril` and will be removed in
SciPy 1.13.0.
Make a copy of a matrix with elements above the kth diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : int, optional
Diagonal above which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
tril : ndarray
Return is the same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import tril
>>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = np.asarray(m)
out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m
return out
def triu(m, k=0):
"""
.. deprecated:: 1.11.0
`tril` is deprecated in favour of `numpy.triu` and will be removed in
SciPy 1.13.0.
Make a copy of a matrix with elements below the kth diagonal zeroed.
Parameters
----------
m : array_like
Matrix whose elements to return
k : int, optional
Diagonal below which to zero elements.
`k` == 0 is the main diagonal, `k` < 0 subdiagonal and
`k` > 0 superdiagonal.
Returns
-------
triu : ndarray
Return matrix with zeroed elements below the kth diagonal and has
same shape and type as `m`.
Examples
--------
>>> from scipy.linalg import triu
>>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = np.asarray(m)
out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m
return out
def toeplitz(c, r=None):
"""
Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like, optional
First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
in this case, if c[0] is real, the result is a Hermitian matrix.
r[0] is ignored; the first row of the returned matrix is
``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See Also
--------
circulant : circulant matrix
hankel : Hankel matrix
solve_toeplitz : Solve a Toeplitz system.
Notes
-----
The behavior when `c` or `r` is a scalar, or when `c` is complex and
`r` is None, was changed in version 0.8.0. The behavior in previous
versions was undocumented and is no longer supported.
Examples
--------
>>> from scipy.linalg import toeplitz
>>> toeplitz([1,2,3], [1,4,5,6])
array([[1, 4, 5, 6],
[2, 1, 4, 5],
[3, 2, 1, 4]])
>>> toeplitz([1.0, 2+3j, 4-1j])
array([[ 1.+0.j, 2.-3.j, 4.+1.j],
[ 2.+3.j, 1.+0.j, 2.-3.j],
[ 4.-1.j, 2.+3.j, 1.+0.j]])
"""
c = np.asarray(c).ravel()
if r is None:
r = c.conjugate()
else:
r = np.asarray(r).ravel()
# Form a 1-D array containing a reversed c followed by r[1:] that could be
# strided to give us toeplitz matrix.
vals = np.concatenate((c[::-1], r[1:]))
out_shp = len(c), len(r)
n = vals.strides[0]
return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy()
def circulant(c):
"""
Construct a circulant matrix.
Parameters
----------
c : (N,) array_like
1-D array, the first column of the matrix.
Returns
-------
A : (N, N) ndarray
A circulant matrix whose first column is `c`.
See Also
--------
toeplitz : Toeplitz matrix
hankel : Hankel matrix
solve_circulant : Solve a circulant system.
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import circulant
>>> circulant([1, 2, 3])
array([[1, 3, 2],
[2, 1, 3],
[3, 2, 1]])
"""
c = np.asarray(c).ravel()
# Form an extended array that could be strided to give circulant version
c_ext = np.concatenate((c[::-1], c[:0:-1]))
L = len(c)
n = c_ext.strides[0]
return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy()
def hankel(c, r=None):
"""
Construct a Hankel matrix.
The Hankel matrix has constant anti-diagonals, with `c` as its
first column and `r` as its last row. If `r` is not given, then
`r = zeros_like(c)` is assumed.
Parameters
----------
c : array_like
First column of the matrix. Whatever the actual shape of `c`, it
will be converted to a 1-D array.
r : array_like, optional
Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed.
r[0] is ignored; the last row of the returned matrix is
``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be
converted to a 1-D array.
Returns
-------
A : (len(c), len(r)) ndarray
The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
See Also
--------
toeplitz : Toeplitz matrix
circulant : circulant matrix
Examples
--------
>>> from scipy.linalg import hankel
>>> hankel([1, 17, 99])
array([[ 1, 17, 99],
[17, 99, 0],
[99, 0, 0]])
>>> hankel([1,2,3,4], [4,7,7,8,9])
array([[1, 2, 3, 4, 7],
[2, 3, 4, 7, 7],
[3, 4, 7, 7, 8],
[4, 7, 7, 8, 9]])
"""
c = np.asarray(c).ravel()
if r is None:
r = np.zeros_like(c)
else:
r = np.asarray(r).ravel()
# Form a 1-D array of values to be used in the matrix, containing `c`
# followed by r[1:].
vals = np.concatenate((c, r[1:]))
# Stride on concatenated array to get hankel matrix
out_shp = len(c), len(r)
n = vals.strides[0]
return as_strided(vals, shape=out_shp, strides=(n, n)).copy()
def hadamard(n, dtype=int):
"""
Construct an Hadamard matrix.
Constructs an n-by-n Hadamard matrix, using Sylvester's
construction. `n` must be a power of 2.
Parameters
----------
n : int
The order of the matrix. `n` must be a power of 2.
dtype : dtype, optional
The data type of the array to be constructed.
Returns
-------
H : (n, n) ndarray
The Hadamard matrix.
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy.linalg import hadamard
>>> hadamard(2, dtype=complex)
array([[ 1.+0.j, 1.+0.j],
[ 1.+0.j, -1.-0.j]])
>>> hadamard(4)
array([[ 1, 1, 1, 1],
[ 1, -1, 1, -1],
[ 1, 1, -1, -1],
[ 1, -1, -1, 1]])
"""
# This function is a slightly modified version of the
# function contributed by Ivo in ticket #675.
if n < 1:
lg2 = 0
else:
lg2 = int(math.log(n, 2))
if 2 ** lg2 != n:
raise ValueError("n must be an positive integer, and n must be "
"a power of 2")
H = np.array([[1]], dtype=dtype)
# Sylvester's construction
for i in range(0, lg2):
H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
return H
def leslie(f, s):
"""
Create a Leslie matrix.
Given the length n array of fecundity coefficients `f` and the length
n-1 array of survival coefficients `s`, return the associated Leslie
matrix.
Parameters
----------
f : (N,) array_like
The "fecundity" coefficients.
s : (N-1,) array_like
The "survival" coefficients, has to be 1-D. The length of `s`
must be one less than the length of `f`, and it must be at least 1.
Returns
-------
L : (N, N) ndarray
The array is zero except for the first row,
which is `f`, and the first sub-diagonal, which is `s`.
The data-type of the array will be the data-type of ``f[0]+s[0]``.
Notes
-----
.. versionadded:: 0.8.0
The Leslie matrix is used to model discrete-time, age-structured
population growth [1]_ [2]_. In a population with `n` age classes, two sets
of parameters define a Leslie matrix: the `n` "fecundity coefficients",
which give the number of offspring per-capita produced by each age
class, and the `n` - 1 "survival coefficients", which give the
per-capita survival rate of each age class.
References
----------
.. [1] P. H. Leslie, On the use of matrices in certain population
mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945)
.. [2] P. H. Leslie, Some further notes on the use of matrices in
population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245
(Dec. 1948)
Examples
--------
>>> from scipy.linalg import leslie
>>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7])
array([[ 0.1, 2. , 1. , 0.1],
[ 0.2, 0. , 0. , 0. ],
[ 0. , 0.8, 0. , 0. ],
[ 0. , 0. , 0.7, 0. ]])
"""
f = np.atleast_1d(f)
s = np.atleast_1d(s)
if f.ndim != 1:
raise ValueError("Incorrect shape for f. f must be 1D")
if s.ndim != 1:
raise ValueError("Incorrect shape for s. s must be 1D")
if f.size != s.size + 1:
raise ValueError("Incorrect lengths for f and s. The length"
" of s must be one less than the length of f.")
if s.size == 0:
raise ValueError("The length of s must be at least 1.")
tmp = f[0] + s[0]
n = f.size
a = np.zeros((n, n), dtype=tmp.dtype)
a[0] = f
a[list(range(1, n)), list(range(0, n - 1))] = s
return a
def kron(a, b):
"""
Kronecker product.
The result is the block matrix::
a[0,0]*b a[0,1]*b ... a[0,-1]*b
a[1,0]*b a[1,1]*b ... a[1,-1]*b
...
a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b
Parameters
----------
a : (M, N) ndarray
Input array
b : (P, Q) ndarray
Input array
Returns
-------
A : (M*P, N*Q) ndarray
Kronecker product of `a` and `b`.
Examples
--------
>>> from numpy import array
>>> from scipy.linalg import kron
>>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
"""
if not a.flags['CONTIGUOUS']:
a = np.reshape(a, a.shape)
if not b.flags['CONTIGUOUS']:
b = np.reshape(b, b.shape)
o = np.outer(a, b)
o = o.reshape(a.shape + b.shape)
return np.concatenate(np.concatenate(o, axis=1), axis=1)
def block_diag(*arrs):
"""
Create a block diagonal matrix from provided arrays.
Given the inputs `A`, `B` and `C`, the output will have these
arrays arranged on the diagonal::
[[A, 0, 0],
[0, B, 0],
[0, 0, C]]
Parameters
----------
A, B, C, ... : array_like, up to 2-D
Input arrays. A 1-D array or array_like sequence of length `n` is
treated as a 2-D array with shape ``(1,n)``.
Returns
-------
D : ndarray
Array with `A`, `B`, `C`, ... on the diagonal. `D` has the
same dtype as `A`.
Notes
-----
If all the input arrays are square, the output is known as a
block diagonal matrix.
Empty sequences (i.e., array-likes of zero size) will not be ignored.
Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import block_diag
>>> A = [[1, 0],
... [0, 1]]
>>> B = [[3, 4, 5],
... [6, 7, 8]]
>>> C = [[7]]
>>> P = np.zeros((2, 0), dtype='int32')
>>> block_diag(A, B, C)
array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 0],
[0, 0, 6, 7, 8, 0],
[0, 0, 0, 0, 0, 7]])
>>> block_diag(A, P, B, C)
array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 0],
[0, 0, 6, 7, 8, 0],
[0, 0, 0, 0, 0, 7]])
>>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]])
array([[ 1., 0., 0., 0., 0.],
[ 0., 2., 3., 0., 0.],
[ 0., 0., 0., 4., 5.],
[ 0., 0., 0., 6., 7.]])
"""
if arrs == ():
arrs = ([],)
arrs = [np.atleast_2d(a) for a in arrs]
bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
if bad_args:
raise ValueError("arguments in the following positions have dimension "
"greater than 2: %s" % bad_args)
shapes = np.array([a.shape for a in arrs])
out_dtype = np.result_type(*[arr.dtype for arr in arrs])
out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs[i]
r += rr
c += cc
return out
def companion(a):
"""
Create a companion matrix.
Create the companion matrix [1]_ associated with the polynomial whose
coefficients are given in `a`.
Parameters
----------
a : (N,) array_like
1-D array of polynomial coefficients. The length of `a` must be
at least two, and ``a[0]`` must not be zero.
Returns
-------
c : (N-1, N-1) ndarray
The first row of `c` is ``-a[1:]/a[0]``, and the first
sub-diagonal is all ones. The data-type of the array is the same
as the data-type of ``1.0*a[0]``.
Raises
------
ValueError
If any of the following are true: a) ``a.ndim != 1``;
b) ``a.size < 2``; c) ``a[0] == 0``.
Notes
-----
.. versionadded:: 0.8.0
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> from scipy.linalg import companion
>>> companion([1, -10, 31, -30])
array([[ 10., -31., 30.],
[ 1., 0., 0.],
[ 0., 1., 0.]])
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Incorrect shape for `a`. `a` must be "
"one-dimensional.")
if a.size < 2:
raise ValueError("The length of `a` must be at least 2.")
if a[0] == 0:
raise ValueError("The first coefficient in `a` must not be zero.")
first_row = -a[1:] / (1.0 * a[0])
n = a.size
c = np.zeros((n - 1, n - 1), dtype=first_row.dtype)
c[0] = first_row
c[list(range(1, n - 1)), list(range(0, n - 2))] = 1
return c
def helmert(n, full=False):
"""
Create an Helmert matrix of order `n`.
This has applications in statistics, compositional or simplicial analysis,
and in Aitchison geometry.
Parameters
----------
n : int
The size of the array to create.
full : bool, optional
If True the (n, n) ndarray will be returned.
Otherwise the submatrix that does not include the first
row will be returned.
Default: False.
Returns
-------
M : ndarray
The Helmert matrix.
The shape is (n, n) or (n-1, n) depending on the `full` argument.
Examples
--------
>>> from scipy.linalg import helmert
>>> helmert(5, full=True)
array([[ 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 ],
[ 0.70710678, -0.70710678, 0. , 0. , 0. ],
[ 0.40824829, 0.40824829, -0.81649658, 0. , 0. ],
[ 0.28867513, 0.28867513, 0.28867513, -0.8660254 , 0. ],
[ 0.2236068 , 0.2236068 , 0.2236068 , 0.2236068 , -0.89442719]])
"""
H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n))
d = np.arange(n) * np.arange(1, n+1)
H[0] = 1
d[0] = n
H_full = H / np.sqrt(d)[:, np.newaxis]
if full:
return H_full
else:
return H_full[1:]
def hilbert(n):
"""
Create a Hilbert matrix of order `n`.
Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
Parameters
----------
n : int
The size of the array to create.
Returns
-------
h : (n, n) ndarray
The Hilbert matrix.
See Also
--------
invhilbert : Compute the inverse of a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import hilbert
>>> hilbert(3)
array([[ 1. , 0.5 , 0.33333333],
[ 0.5 , 0.33333333, 0.25 ],
[ 0.33333333, 0.25 , 0.2 ]])
"""
values = 1.0 / (1.0 + np.arange(2 * n - 1))
h = hankel(values[:n], r=values[n - 1:])
return h
def invhilbert(n, exact=False):
"""
Compute the inverse of the Hilbert matrix of order `n`.
The entries in the inverse of a Hilbert matrix are integers. When `n`
is greater than 14, some entries in the inverse exceed the upper limit
of 64 bit integers. The `exact` argument provides two options for
dealing with these large integers.
Parameters
----------
n : int
The order of the Hilbert matrix.
exact : bool, optional
If False, the data type of the array that is returned is np.float64,
and the array is an approximation of the inverse.
If True, the array is the exact integer inverse array. To represent
the exact inverse when n > 14, the returned array is an object array
of long integers. For n <= 14, the exact inverse is returned as an
array with data type np.int64.
Returns
-------
invh : (n, n) ndarray
The data type of the array is np.float64 if `exact` is False.
If `exact` is True, the data type is either np.int64 (for n <= 14)
or object (for n > 14). In the latter case, the objects in the
array will be long integers.
See Also
--------
hilbert : Create a Hilbert matrix.
Notes
-----
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.linalg import invhilbert
>>> invhilbert(4)
array([[ 16., -120., 240., -140.],
[ -120., 1200., -2700., 1680.],
[ 240., -2700., 6480., -4200.],
[ -140., 1680., -4200., 2800.]])
>>> invhilbert(4, exact=True)
array([[ 16, -120, 240, -140],
[ -120, 1200, -2700, 1680],
[ 240, -2700, 6480, -4200],
[ -140, 1680, -4200, 2800]], dtype=int64)
>>> invhilbert(16)[7,7]
4.2475099528537506e+19
>>> invhilbert(16, exact=True)[7,7]
42475099528537378560
"""
from scipy.special import comb
if exact:
if n > 14:
dtype = object
else:
dtype = np.int64
else:
dtype = np.float64
invh = np.empty((n, n), dtype=dtype)
for i in range(n):
for j in range(0, i + 1):
s = i + j
invh[i, j] = ((-1) ** s * (s + 1) *
comb(n + i, n - j - 1, exact) *
comb(n + j, n - i - 1, exact) *
comb(s, i, exact) ** 2)
if i != j:
invh[j, i] = invh[i, j]
return invh
def pascal(n, kind='symmetric', exact=True):
"""
Returns the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
numpy.uint64 (if n < 35) or an object array of Python long integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and the values in the array will not be the exact
coefficients, but this version is much faster than `exact=True`.
Returns
-------
p : (n, n) ndarray
The Pascal matrix.
See Also
--------
invpascal
Notes
-----
See https://en.wikipedia.org/wiki/Pascal_matrix for more information
about Pascal matrices.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.linalg import pascal
>>> pascal(4)
array([[ 1, 1, 1, 1],
[ 1, 2, 3, 4],
[ 1, 3, 6, 10],
[ 1, 4, 10, 20]], dtype=uint64)
>>> pascal(4, kind='lower')
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]], dtype=uint64)
>>> pascal(50)[-1, -1]
25477612258980856902730428600
>>> from scipy.special import comb
>>> comb(98, 49, exact=True)
25477612258980856902730428600
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("kind must be 'symmetric', 'lower', or 'upper'")
if exact:
if n >= 35:
L_n = np.empty((n, n), dtype=object)
L_n.fill(0)
else:
L_n = np.zeros((n, n), dtype=np.uint64)
for i in range(n):
for j in range(i + 1):
L_n[i, j] = comb(i, j, exact=True)
else:
L_n = comb(*np.ogrid[:n, :n])
if kind == 'lower':
p = L_n
elif kind == 'upper':
p = L_n.T
else:
p = np.dot(L_n, L_n.T)
return p
def invpascal(n, kind='symmetric', exact=True):
"""
Returns the inverse of the n x n Pascal matrix.
The Pascal matrix is a matrix containing the binomial coefficients as
its elements.
Parameters
----------
n : int
The size of the matrix to create; that is, the result is an n x n
matrix.
kind : str, optional
Must be one of 'symmetric', 'lower', or 'upper'.
Default is 'symmetric'.
exact : bool, optional
If `exact` is True, the result is either an array of type
``numpy.int64`` (if `n` <= 35) or an object array of Python integers.
If `exact` is False, the coefficients in the matrix are computed using
`scipy.special.comb` with `exact=False`. The result will be a floating
point array, and for large `n`, the values in the array will not be the
exact coefficients.
Returns
-------
invp : (n, n) ndarray
The inverse of the Pascal matrix.
See Also
--------
pascal
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] "Pascal matrix", https://en.wikipedia.org/wiki/Pascal_matrix
.. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical
Gazette, 59(408), pp. 111-112, 1975.
Examples
--------
>>> from scipy.linalg import invpascal, pascal
>>> invp = invpascal(5)
>>> invp
array([[ 5, -10, 10, -5, 1],
[-10, 30, -35, 19, -4],
[ 10, -35, 46, -27, 6],
[ -5, 19, -27, 17, -4],
[ 1, -4, 6, -4, 1]])
>>> p = pascal(5)
>>> p.dot(invp)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
An example of the use of `kind` and `exact`:
>>> invpascal(5, kind='lower', exact=False)
array([[ 1., -0., 0., -0., 0.],
[-1., 1., -0., 0., -0.],
[ 1., -2., 1., -0., 0.],
[-1., 3., -3., 1., -0.],
[ 1., -4., 6., -4., 1.]])
"""
from scipy.special import comb
if kind not in ['symmetric', 'lower', 'upper']:
raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.")
if kind == 'symmetric':
if exact:
if n > 34:
dt = object
else:
dt = np.int64
else:
dt = np.float64
invp = np.empty((n, n), dtype=dt)
for i in range(n):
for j in range(0, i + 1):
v = 0
for k in range(n - i):
v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j,
exact=exact)
invp[i, j] = (-1)**(i - j) * v
if i != j:
invp[j, i] = invp[i, j]
else:
# For the 'lower' and 'upper' cases, we computer the inverse by
# changing the sign of every other diagonal of the pascal matrix.
invp = pascal(n, kind=kind, exact=exact)
if invp.dtype == np.uint64:
# This cast from np.uint64 to int64 OK, because if `kind` is not
# "symmetric", the values in invp are all much less than 2**63.
invp = invp.view(np.int64)
# The toeplitz matrix has alternating bands of 1 and -1.
invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype)
return invp
def dft(n, scale=None):
"""
Discrete Fourier transform matrix.
Create the matrix that computes the discrete Fourier transform of a
sequence [1]_. The nth primitive root of unity used to generate the
matrix is exp(-2*pi*i/n), where i = sqrt(-1).
Parameters
----------
n : int
Size the matrix to create.
scale : str, optional
Must be None, 'sqrtn', or 'n'.
If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
If `scale` is 'n', the matrix is divided by `n`.
If `scale` is None (the default), the matrix is not normalized, and the
return value is simply the Vandermonde matrix of the roots of unity.
Returns
-------
m : (n, n) ndarray
The DFT matrix.
Notes
-----
When `scale` is None, multiplying a vector by the matrix returned by
`dft` is mathematically equivalent to (but much less efficient than)
the calculation performed by `scipy.fft.fft`.
.. versionadded:: 0.14.0
References
----------
.. [1] "DFT matrix", https://en.wikipedia.org/wiki/DFT_matrix
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import dft
>>> np.set_printoptions(precision=2, suppress=True) # for compact output
>>> m = dft(5)
>>> m
array([[ 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j ],
[ 1. +0.j , 0.31-0.95j, -0.81-0.59j, -0.81+0.59j, 0.31+0.95j],
[ 1. +0.j , -0.81-0.59j, 0.31+0.95j, 0.31-0.95j, -0.81+0.59j],
[ 1. +0.j , -0.81+0.59j, 0.31-0.95j, 0.31+0.95j, -0.81-0.59j],
[ 1. +0.j , 0.31+0.95j, -0.81+0.59j, -0.81-0.59j, 0.31-0.95j]])
>>> x = np.array([1, 2, 3, 0, 3])
>>> m @ x # Compute the DFT of x
array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j])
Verify that ``m @ x`` is the same as ``fft(x)``.
>>> from scipy.fft import fft
>>> fft(x) # Same result as m @ x
array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j])
"""
if scale not in [None, 'sqrtn', 'n']:
raise ValueError("scale must be None, 'sqrtn', or 'n'; "
"{!r} is not valid.".format(scale))
omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
m = omegas ** np.arange(n)
if scale == 'sqrtn':
m /= math.sqrt(n)
elif scale == 'n':
m /= n
return m
def fiedler(a):
"""Returns a symmetric Fiedler matrix
Given an sequence of numbers `a`, Fiedler matrices have the structure
``F[i, j] = np.abs(a[i] - a[j])``, and hence zero diagonals and nonnegative
entries. A Fiedler matrix has a dominant positive eigenvalue and other
eigenvalues are negative. Although not valid generally, for certain inputs,
the inverse and the determinant can be derived explicitly as given in [1]_.
Parameters
----------
a : (n,) array_like
coefficient array
Returns
-------
F : (n, n) ndarray
See Also
--------
circulant, toeplitz
Notes
-----
.. versionadded:: 1.3.0
References
----------
.. [1] J. Todd, "Basic Numerical Mathematics: Vol.2 : Numerical Algebra",
1977, Birkhauser, :doi:`10.1007/978-3-0348-7286-7`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import det, inv, fiedler
>>> a = [1, 4, 12, 45, 77]
>>> n = len(a)
>>> A = fiedler(a)
>>> A
array([[ 0, 3, 11, 44, 76],
[ 3, 0, 8, 41, 73],
[11, 8, 0, 33, 65],
[44, 41, 33, 0, 32],
[76, 73, 65, 32, 0]])
The explicit formulas for determinant and inverse seem to hold only for
monotonically increasing/decreasing arrays. Note the tridiagonal structure
and the corners.
>>> Ai = inv(A)
>>> Ai[np.abs(Ai) < 1e-12] = 0. # cleanup the numerical noise for display
>>> Ai
array([[-0.16008772, 0.16666667, 0. , 0. , 0.00657895],
[ 0.16666667, -0.22916667, 0.0625 , 0. , 0. ],
[ 0. , 0.0625 , -0.07765152, 0.01515152, 0. ],
[ 0. , 0. , 0.01515152, -0.03077652, 0.015625 ],
[ 0.00657895, 0. , 0. , 0.015625 , -0.00904605]])
>>> det(A)
15409151.999999998
>>> (-1)**(n-1) * 2**(n-2) * np.diff(a).prod() * (a[-1] - a[0])
15409152
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Input 'a' must be a 1D array.")
if a.size == 0:
return np.array([], dtype=float)
elif a.size == 1:
return np.array([[0.]])
else:
return np.abs(a[:, None] - a)
def fiedler_companion(a):
""" Returns a Fiedler companion matrix
Given a polynomial coefficient array ``a``, this function forms a
pentadiagonal matrix with a special structure whose eigenvalues coincides
with the roots of ``a``.
Parameters
----------
a : (N,) array_like
1-D array of polynomial coefficients in descending order with a nonzero
leading coefficient. For ``N < 2``, an empty array is returned.
Returns
-------
c : (N-1, N-1) ndarray
Resulting companion matrix
See Also
--------
companion
Notes
-----
Similar to `companion` the leading coefficient should be nonzero. In the case
the leading coefficient is not 1, other coefficients are rescaled before
the array generation. To avoid numerical issues, it is best to provide a
monic polynomial.
.. versionadded:: 1.3.0
References
----------
.. [1] M. Fiedler, " A note on companion matrices", Linear Algebra and its
Applications, 2003, :doi:`10.1016/S0024-3795(03)00548-2`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import fiedler_companion, eigvals
>>> p = np.poly(np.arange(1, 9, 2)) # [1., -16., 86., -176., 105.]
>>> fc = fiedler_companion(p)
>>> fc
array([[ 16., -86., 1., 0.],
[ 1., 0., 0., 0.],
[ 0., 176., 0., -105.],
[ 0., 1., 0., 0.]])
>>> eigvals(fc)
array([7.+0.j, 5.+0.j, 3.+0.j, 1.+0.j])
"""
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Input 'a' must be a 1-D array.")
if a.size <= 2:
if a.size == 2:
return np.array([[-(a/a[0])[-1]]])
return np.array([], dtype=a.dtype)
if a[0] == 0.:
raise ValueError('Leading coefficient is zero.')
a = a/a[0]
n = a.size - 1
c = np.zeros((n, n), dtype=a.dtype)
# subdiagonals
c[range(3, n, 2), range(1, n-2, 2)] = 1.
c[range(2, n, 2), range(1, n-1, 2)] = -a[3::2]
# superdiagonals
c[range(0, n-2, 2), range(2, n, 2)] = 1.
c[range(0, n-1, 2), range(1, n, 2)] = -a[2::2]
c[[0, 1], 0] = [-a[1], 1]
return c
def convolution_matrix(a, n, mode='full'):
"""
Construct a convolution matrix.
Constructs the Toeplitz matrix representing one-dimensional
convolution [1]_. See the notes below for details.
Parameters
----------
a : (m,) array_like
The 1-D array to convolve.
n : int
The number of columns in the resulting matrix. It gives the length
of the input to be convolved with `a`. This is analogous to the
length of `v` in ``numpy.convolve(a, v)``.
mode : str
This is analogous to `mode` in ``numpy.convolve(v, a, mode)``.
It must be one of ('full', 'valid', 'same').
See below for how `mode` determines the shape of the result.
Returns
-------
A : (k, n) ndarray
The convolution matrix whose row count `k` depends on `mode`::
======= =========================
mode k
======= =========================
'full' m + n -1
'same' max(m, n)
'valid' max(m, n) - min(m, n) + 1
======= =========================
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The code::
A = convolution_matrix(a, n, mode)
creates a Toeplitz matrix `A` such that ``A @ v`` is equivalent to
using ``convolve(a, v, mode)``. The returned array always has `n`
columns. The number of rows depends on the specified `mode`, as
explained above.
In the default 'full' mode, the entries of `A` are given by::
A[i, j] == (a[i-j] if (0 <= (i-j) < m) else 0)
where ``m = len(a)``. Suppose, for example, the input array is
``[x, y, z]``. The convolution matrix has the form::
[x, 0, 0, ..., 0, 0]
[y, x, 0, ..., 0, 0]
[z, y, x, ..., 0, 0]
...
[0, 0, 0, ..., x, 0]
[0, 0, 0, ..., y, x]
[0, 0, 0, ..., z, y]
[0, 0, 0, ..., 0, z]
In 'valid' mode, the entries of `A` are given by::
A[i, j] == (a[i-j+m-1] if (0 <= (i-j+m-1) < m) else 0)
This corresponds to a matrix whose rows are the subset of those from
the 'full' case where all the coefficients in `a` are contained in the
row. For input ``[x, y, z]``, this array looks like::
[z, y, x, 0, 0, ..., 0, 0, 0]
[0, z, y, x, 0, ..., 0, 0, 0]
[0, 0, z, y, x, ..., 0, 0, 0]
...
[0, 0, 0, 0, 0, ..., x, 0, 0]
[0, 0, 0, 0, 0, ..., y, x, 0]
[0, 0, 0, 0, 0, ..., z, y, x]
In the 'same' mode, the entries of `A` are given by::
d = (m - 1) // 2
A[i, j] == (a[i-j+d] if (0 <= (i-j+d) < m) else 0)
The typical application of the 'same' mode is when one has a signal of
length `n` (with `n` greater than ``len(a)``), and the desired output
is a filtered signal that is still of length `n`.
For input ``[x, y, z]``, this array looks like::
[y, x, 0, 0, ..., 0, 0, 0]
[z, y, x, 0, ..., 0, 0, 0]
[0, z, y, x, ..., 0, 0, 0]
[0, 0, z, y, ..., 0, 0, 0]
...
[0, 0, 0, 0, ..., y, x, 0]
[0, 0, 0, 0, ..., z, y, x]
[0, 0, 0, 0, ..., 0, z, y]
.. versionadded:: 1.5.0
References
----------
.. [1] "Convolution", https://en.wikipedia.org/wiki/Convolution
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import convolution_matrix
>>> A = convolution_matrix([-1, 4, -2], 5, mode='same')
>>> A
array([[ 4, -1, 0, 0, 0],
[-2, 4, -1, 0, 0],
[ 0, -2, 4, -1, 0],
[ 0, 0, -2, 4, -1],
[ 0, 0, 0, -2, 4]])
Compare multiplication by `A` with the use of `numpy.convolve`.
>>> x = np.array([1, 2, 0, -3, 0.5])
>>> A @ x
array([ 2. , 6. , -1. , -12.5, 8. ])
Verify that ``A @ x`` produced the same result as applying the
convolution function.
>>> np.convolve([-1, 4, -2], x, mode='same')
array([ 2. , 6. , -1. , -12.5, 8. ])
For comparison to the case ``mode='same'`` shown above, here are the
matrices produced by ``mode='full'`` and ``mode='valid'`` for the
same coefficients and size.
>>> convolution_matrix([-1, 4, -2], 5, mode='full')
array([[-1, 0, 0, 0, 0],
[ 4, -1, 0, 0, 0],
[-2, 4, -1, 0, 0],
[ 0, -2, 4, -1, 0],
[ 0, 0, -2, 4, -1],
[ 0, 0, 0, -2, 4],
[ 0, 0, 0, 0, -2]])
>>> convolution_matrix([-1, 4, -2], 5, mode='valid')
array([[-2, 4, -1, 0, 0],
[ 0, -2, 4, -1, 0],
[ 0, 0, -2, 4, -1]])
"""
if n <= 0:
raise ValueError('n must be a positive integer.')
a = np.asarray(a)
if a.ndim != 1:
raise ValueError('convolution_matrix expects a one-dimensional '
'array as input')
if a.size == 0:
raise ValueError('len(a) must be at least 1.')
if mode not in ('full', 'valid', 'same'):
raise ValueError(
"'mode' argument must be one of ('full', 'valid', 'same')")
# create zero padded versions of the array
az = np.pad(a, (0, n-1), 'constant')
raz = np.pad(a[::-1], (0, n-1), 'constant')
if mode == 'same':
trim = min(n, len(a)) - 1
tb = trim//2
te = trim - tb
col0 = az[tb:len(az)-te]
row0 = raz[-n-tb:len(raz)-tb]
elif mode == 'valid':
tb = min(n, len(a)) - 1
te = tb
col0 = az[tb:len(az)-te]
row0 = raz[-n-tb:len(raz)-tb]
else: # 'full'
col0 = az
row0 = raz[-n:]
return toeplitz(col0, row0)
| 40,711
| 28.100786
| 81
|
py
|
scipy
|
scipy-main/scipy/linalg/_procrustes.py
|
"""
Solve the orthogonal Procrustes problem.
"""
import numpy as np
from ._decomp_svd import svd
__all__ = ['orthogonal_procrustes']
def orthogonal_procrustes(A, B, check_finite=True):
"""
Compute the matrix solution of the orthogonal Procrustes problem.
Given matrices A and B of equal shape, find an orthogonal matrix R
that most closely maps A to B using the algorithm given in [1]_.
Parameters
----------
A : (M, N) array_like
Matrix to be mapped.
B : (M, N) array_like
Target matrix.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
R : (N, N) ndarray
The matrix solution of the orthogonal Procrustes problem.
Minimizes the Frobenius norm of ``(A @ R) - B``, subject to
``R.T @ R = I``.
scale : float
Sum of the singular values of ``A.T @ B``.
Raises
------
ValueError
If the input array shapes don't match or if check_finite is True and
the arrays contain Inf or NaN.
Notes
-----
Note that unlike higher level Procrustes analyses of spatial data, this
function only uses orthogonal transformations like rotations and
reflections, and it does not use scaling or translation.
.. versionadded:: 0.15.0
References
----------
.. [1] Peter H. Schonemann, "A generalized solution of the orthogonal
Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1966.
:doi:`10.1007/BF02289451`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import orthogonal_procrustes
>>> A = np.array([[ 2, 0, 1], [-2, 0, 0]])
Flip the order of columns and check for the anti-diagonal mapping
>>> R, sca = orthogonal_procrustes(A, np.fliplr(A))
>>> R
array([[-5.34384992e-17, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[ 1.00000000e+00, 0.00000000e+00, -7.85941422e-17]])
>>> sca
9.0
"""
if check_finite:
A = np.asarray_chkfinite(A)
B = np.asarray_chkfinite(B)
else:
A = np.asanyarray(A)
B = np.asanyarray(B)
if A.ndim != 2:
raise ValueError('expected ndim to be 2, but observed %s' % A.ndim)
if A.shape != B.shape:
raise ValueError('the shapes of A and B differ ({} vs {})'.format(
A.shape, B.shape))
# Be clever with transposes, with the intention to save memory.
u, w, vt = svd(B.T.dot(A).T)
R = u.dot(vt)
scale = w.sum()
return R, scale
| 2,786
| 29.293478
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/matfuncs.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _matfuncs
__all__ = [ # noqa: F822
'expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm',
'tanhm', 'logm', 'funm', 'signm', 'sqrtm',
'expm_frechet', 'expm_cond', 'fractional_matrix_power',
'khatri_rao', 'prod', 'logical_not', 'ravel', 'transpose',
'conjugate', 'absolute', 'amax', 'sign', 'isfinite', 'single',
'norm', 'solve', 'inv', 'triu', 'svd', 'schur', 'rsf2csf', 'eps', 'feps'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.matfuncs is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.matfuncs` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_matfuncs, name)
| 1,098
| 32.30303
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/decomp_svd.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _decomp_svd
__all__ = [ # noqa: F822
'svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space',
'LinAlgError', 'get_lapack_funcs'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.decomp_svd is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.decomp_svd` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_decomp_svd, name)
| 850
| 28.344828
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/_matfuncs_sqrtm.py
|
"""
Matrix square root for general matrices and for upper triangular matrices.
This module exists to avoid cyclic imports.
"""
__all__ = ['sqrtm']
import numpy as np
from scipy._lib._util import _asarray_validated
# Local imports
from ._misc import norm
from .lapack import ztrsyl, dtrsyl
from ._decomp_schur import schur, rsf2csf
class SqrtmError(np.linalg.LinAlgError):
pass
from ._matfuncs_sqrtm_triu import within_block_loop # noqa: E402
def _sqrtm_triu(T, blocksize=64):
"""
Matrix square root of an upper triangular matrix.
This is a helper function for `sqrtm` and `logm`.
Parameters
----------
T : (N, N) array_like upper triangular
Matrix whose square root to evaluate
blocksize : int, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `T`
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
"""
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
# Cast to complex as necessary + ensure double precision
if not keep_it_real:
T = np.asarray(T, dtype=np.complex128, order="C")
T_diag = np.asarray(T_diag, dtype=np.complex128)
else:
T = np.asarray(T, dtype=np.float64, order="C")
T_diag = np.asarray(T_diag, dtype=np.float64)
R = np.diag(np.sqrt(T_diag))
# Compute the number of blocks to use; use at least one block.
n, n = T.shape
nblocks = max(n // blocksize, 1)
# Compute the smaller of the two sizes of blocks that
# we will actually use, and compute the number of large blocks.
bsmall, nlarge = divmod(n, nblocks)
blarge = bsmall + 1
nsmall = nblocks - nlarge
if nsmall * bsmall + nlarge * blarge != n:
raise Exception('internal inconsistency')
# Define the index range covered by each block.
start_stop_pairs = []
start = 0
for count, size in ((nsmall, bsmall), (nlarge, blarge)):
for i in range(count):
start_stop_pairs.append((start, start + size))
start += size
# Within-block interactions (Cythonized)
try:
within_block_loop(R, T, start_stop_pairs, nblocks)
except RuntimeError as e:
raise SqrtmError(*e.args) from e
# Between-block interactions (Cython would give no significant speedup)
for j in range(nblocks):
jstart, jstop = start_stop_pairs[j]
for i in range(j-1, -1, -1):
istart, istop = start_stop_pairs[i]
S = T[istart:istop, jstart:jstop]
if j - i > 1:
S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart,
jstart:jstop])
# Invoke LAPACK.
# For more details, see the solve_sylvester implemention
# and the fortran dtrsyl and ztrsyl docs.
Rii = R[istart:istop, istart:istop]
Rjj = R[jstart:jstop, jstart:jstop]
if keep_it_real:
x, scale, info = dtrsyl(Rii, Rjj, S)
else:
x, scale, info = ztrsyl(Rii, Rjj, S)
R[istart:istop, jstart:jstop] = x * scale
# Return the matrix square root.
return R
def sqrtm(A, disp=True, blocksize=64):
"""
Matrix square root.
Parameters
----------
A : (N, N) array_like
Matrix whose square root to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
blocksize : integer, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `A`. The dtype is float or complex.
The precision (data size) is determined based on the precision of
input `A`. When the dtype is float, the precision is the same as `A`.
When the dtype is complex, the precision is double that of `A`. The
precision might be clipped by each dtype precision range.
errest : float
(if disp == False)
Frobenius norm of the estimated error, ||err||_F / ||A||_F
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import sqrtm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> r = sqrtm(a)
>>> r
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> r.dot(r)
array([[ 1., 3.],
[ 1., 4.]])
"""
byte_size = np.asarray(A).dtype.itemsize
A = _asarray_validated(A, check_finite=True, as_inexact=True)
if len(A.shape) != 2:
raise ValueError("Non-matrix input to matrix function.")
if blocksize < 1:
raise ValueError("The blocksize should be at least 1.")
keep_it_real = np.isrealobj(A)
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
failflag = False
try:
R = _sqrtm_triu(T, blocksize=blocksize)
ZH = np.conjugate(Z).T
X = Z.dot(R).dot(ZH)
if not np.iscomplexobj(X):
# float byte size range: f2 ~ f16
X = X.astype(f"f{np.clip(byte_size, 2, 16)}", copy=False)
else:
# complex byte size range: c8 ~ c32.
# c32(complex256) might not be supported in some environments.
if hasattr(np, 'complex256'):
X = X.astype(f"c{np.clip(byte_size*2, 8, 32)}", copy=False)
else:
X = X.astype(f"c{np.clip(byte_size*2, 8, 16)}", copy=False)
except SqrtmError:
failflag = True
X = np.empty_like(A)
X.fill(np.nan)
if disp:
if failflag:
print("Failed to find a square root.")
return X
else:
try:
arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro')
except ValueError:
# NaNs in matrix
arg2 = np.inf
return X, arg2
| 6,661
| 30.57346
| 77
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_ldl.py
|
from warnings import warn
import numpy as np
from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag,
iscomplexobj, tril, triu, argsort, empty_like)
from ._decomp import _asarray_validated
from .lapack import get_lapack_funcs, _compute_lwork
__all__ = ['ldl']
def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True):
""" Computes the LDLt or Bunch-Kaufman factorization of a symmetric/
hermitian matrix.
This function returns a block diagonal matrix D consisting blocks of size
at most 2x2 and also a possibly permuted unit lower triangular matrix
``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T``
holds. If `lower` is False then (again possibly permuted) upper
triangular matrices are returned as outer factors.
The permutation array can be used to triangularize the outer factors
simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower
triangular matrix. This is also equivalent to multiplication with a
permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted
identity matrix ``I[:, perm]``.
Depending on the value of the boolean `lower`, only upper or lower
triangular part of the input array is referenced. Hence, a triangular
matrix on entry would give the same result as if the full matrix is
supplied.
Parameters
----------
A : array_like
Square input array
lower : bool, optional
This switches between the lower and upper triangular outer factors of
the factorization. Lower triangular (``lower=True``) is the default.
hermitian : bool, optional
For complex-valued arrays, this defines whether ``A = A.conj().T`` or
``A = A.T`` is assumed. For real-valued arrays, this switch has no
effect.
overwrite_a : bool, optional
Allow overwriting data in `A` (may enhance performance). The default
is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : ndarray
The (possibly) permuted upper/lower triangular outer factor of the
factorization.
d : ndarray
The block diagonal multiplier of the factorization.
perm : ndarray
The row-permutation index array that brings lu into triangular form.
Raises
------
ValueError
If input array is not square.
ComplexWarning
If a complex-valued array with nonzero imaginary parts on the
diagonal is given and hermitian is set to True.
See Also
--------
cholesky, lu
Notes
-----
This function uses ``?SYTRF`` routines for symmetric matrices and
``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for
the algorithm details.
Depending on the `lower` keyword value, only lower or upper triangular
part of the input array is referenced. Moreover, this keyword also defines
the structure of the outer factors of the factorization.
.. versionadded:: 1.1.0
References
----------
.. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating
inertia and solving symmetric linear systems, Math. Comput. Vol.31,
1977. :doi:`10.2307/2005787`
Examples
--------
Given an upper triangular array ``a`` that represents the full symmetric
array with its entries, obtain ``l``, 'd' and the permutation vector `perm`:
>>> import numpy as np
>>> from scipy.linalg import ldl
>>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]])
>>> lu, d, perm = ldl(a, lower=0) # Use the upper part
>>> lu
array([[ 0. , 0. , 1. ],
[ 0. , 1. , -0.5],
[ 1. , 1. , 1.5]])
>>> d
array([[-5. , 0. , 0. ],
[ 0. , 1.5, 0. ],
[ 0. , 0. , 2. ]])
>>> perm
array([2, 1, 0])
>>> lu[perm, :]
array([[ 1. , 1. , 1.5],
[ 0. , 1. , -0.5],
[ 0. , 0. , 1. ]])
>>> lu.dot(d).dot(lu.T)
array([[ 2., -1., 3.],
[-1., 2., 0.],
[ 3., 0., 1.]])
"""
a = atleast_2d(_asarray_validated(A, check_finite=check_finite))
if a.shape[0] != a.shape[1]:
raise ValueError('The input array "a" should be square.')
# Return empty arrays for empty square input
if a.size == 0:
return empty_like(a), empty_like(a), np.array([], dtype=int)
n = a.shape[0]
r_or_c = complex if iscomplexobj(a) else float
# Get the LAPACK routine
if r_or_c is complex and hermitian:
s, sl = 'hetrf', 'hetrf_lwork'
if np.any(imag(diag(a))):
warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal'
'are ignored. Use "hermitian=False" for factorization of'
'complex symmetric arrays.', ComplexWarning, stacklevel=2)
else:
s, sl = 'sytrf', 'sytrf_lwork'
solver, solver_lwork = get_lapack_funcs((s, sl), (a,))
lwork = _compute_lwork(solver_lwork, n, lower=lower)
ldu, piv, info = solver(a, lwork=lwork, lower=lower,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('{} exited with the internal error "illegal value '
'in argument number {}". See LAPACK documentation '
'for the error codes.'.format(s.upper(), -info))
swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower)
d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian)
lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower)
return lu, d, perm
def _ldl_sanitize_ipiv(a, lower=True):
"""
This helper function takes the rather strangely encoded permutation array
returned by the LAPACK routines ?(HE/SY)TRF and converts it into
regularized permutation and diagonal pivot size format.
Since FORTRAN uses 1-indexing and LAPACK uses different start points for
upper and lower formats there are certain offsets in the indices used
below.
Let's assume a result where the matrix is 6x6 and there are two 2x2
and two 1x1 blocks reported by the routine. To ease the coding efforts,
we still populate a 6-sized array and fill zeros as the following ::
pivots = [2, 0, 2, 0, 1, 1]
This denotes a diagonal matrix of the form ::
[x x ]
[x x ]
[ x x ]
[ x x ]
[ x ]
[ x]
In other words, we write 2 when the 2x2 block is first encountered and
automatically write 0 to the next entry and skip the next spin of the
loop. Thus, a separate counter or array appends to keep track of block
sizes are avoided. If needed, zeros can be filtered out later without
losing the block structure.
Parameters
----------
a : ndarray
The permutation array ipiv returned by LAPACK
lower : bool, optional
The switch to select whether upper or lower triangle is chosen in
the LAPACK call.
Returns
-------
swap_ : ndarray
The array that defines the row/column swap operations. For example,
if row two is swapped with row four, the result is [0, 3, 2, 3].
pivots : ndarray
The array that defines the block diagonal structure as given above.
"""
n = a.size
swap_ = arange(n)
pivots = zeros_like(swap_, dtype=int)
skip_2x2 = False
# Some upper/lower dependent offset values
# range (s)tart, r(e)nd, r(i)ncrement
x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1)
for ind in range(rs, re, ri):
# If previous spin belonged already to a 2x2 block
if skip_2x2:
skip_2x2 = False
continue
cur_val = a[ind]
# do we have a 1x1 block or not?
if cur_val > 0:
if cur_val != ind+1:
# Index value != array value --> permutation required
swap_[ind] = swap_[cur_val-1]
pivots[ind] = 1
# Not.
elif cur_val < 0 and cur_val == a[ind+x]:
# first neg entry of 2x2 block identifier
if -cur_val != ind+2:
# Index value != array value --> permutation required
swap_[ind+x] = swap_[-cur_val-1]
pivots[ind+y] = 2
skip_2x2 = True
else: # Doesn't make sense, give up
raise ValueError('While parsing the permutation array '
'in "scipy.linalg.ldl", invalid entries '
'found. The array syntax is invalid.')
return swap_, pivots
def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):
"""
Helper function to extract the diagonal and triangular matrices for
LDL.T factorization.
Parameters
----------
ldu : ndarray
The compact output returned by the LAPACK routing
pivs : ndarray
The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For
every 2 there is a succeeding 0.
lower : bool, optional
If set to False, upper triangular part is considered.
hermitian : bool, optional
If set to False a symmetric complex array is assumed.
Returns
-------
d : ndarray
The block diagonal matrix.
lu : ndarray
The upper/lower triangular matrix
"""
is_c = iscomplexobj(ldu)
d = diag(diag(ldu))
n = d.shape[0]
blk_i = 0 # block index
# row/column offsets for selecting sub-, super-diagonal
x, y = (1, 0) if lower else (0, 1)
lu = tril(ldu, -1) if lower else triu(ldu, 1)
diag_inds = arange(n)
lu[diag_inds, diag_inds] = 1
for blk in pivs[pivs != 0]:
# increment the block index and check for 2s
# if 2 then copy the off diagonals depending on uplo
inc = blk_i + blk
if blk == 2:
d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y]
# If Hermitian matrix is factorized, the cross-offdiagonal element
# should be conjugated.
if is_c and hermitian:
d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj()
else:
d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y]
lu[blk_i+x, blk_i+y] = 0.
blk_i = inc
return d, lu
def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True):
"""
Helper function to construct explicit outer factors of LDL factorization.
If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k).
Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See
LAPACK documentation for more details.
Parameters
----------
lu : ndarray
The triangular array that is extracted from LAPACK routine call with
ones on the diagonals.
swap_vec : ndarray
The array that defines the row swapping indices. If the kth entry is m
then rows k,m are swapped. Notice that the mth entry is not necessarily
k to avoid undoing the swapping.
pivs : ndarray
The array that defines the block diagonal structure returned by
_ldl_sanitize_ipiv().
lower : bool, optional
The boolean to switch between lower and upper triangular structure.
Returns
-------
lu : ndarray
The square outer factor which satisfies the L * D * L.T = A
perm : ndarray
The permutation vector that brings the lu to the triangular form
Notes
-----
Note that the original argument "lu" is overwritten.
"""
n = lu.shape[0]
perm = arange(n)
# Setup the reading order of the permutation matrix for upper/lower
rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1)
for ind in range(rs, re, ri):
s_ind = swap_vec[ind]
if s_ind != ind:
# Column start and end positions
col_s = ind if lower else 0
col_e = n if lower else ind+1
# If we stumble upon a 2x2 block include both cols in the perm.
if pivs[ind] == (0 if lower else 2):
col_s += -1 if lower else 0
col_e += 0 if lower else 1
lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e]
perm[[s_ind, ind]] = perm[[ind, s_ind]]
return lu, argsort(perm)
| 12,516
| 34.458924
| 80
|
py
|
scipy
|
scipy-main/scipy/linalg/_sketches.py
|
""" Sketching-based Matrix Computations """
# Author: Jordi Montes <jomsdev@gmail.com>
# August 28, 2017
import numpy as np
from scipy._lib._util import check_random_state, rng_integers
from scipy.sparse import csc_matrix
__all__ = ['clarkson_woodruff_transform']
def cwt_matrix(n_rows, n_columns, seed=None):
r"""
Generate a matrix S which represents a Clarkson-Woodruff transform.
Given the desired size of matrix, the method returns a matrix S of size
(n_rows, n_columns) where each column has all the entries set to 0
except for one position which has been randomly set to +1 or -1 with
equal probability.
Parameters
----------
n_rows : int
Number of rows of S
n_columns : int
Number of columns of S
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
S : (n_rows, n_columns) csc_matrix
The returned matrix has ``n_columns`` nonzero entries.
Notes
-----
Given a matrix A, with probability at least 9/10,
.. math:: \|SA\| = (1 \pm \epsilon)\|A\|
Where the error epsilon is related to the size of S.
"""
rng = check_random_state(seed)
rows = rng_integers(rng, 0, n_rows, n_columns)
cols = np.arange(n_columns+1)
signs = rng.choice([1, -1], n_columns)
S = csc_matrix((signs, rows, cols),shape=(n_rows, n_columns))
return S
def clarkson_woodruff_transform(input_matrix, sketch_size, seed=None):
r"""
Applies a Clarkson-Woodruff Transform/sketch to the input matrix.
Given an input_matrix ``A`` of size ``(n, d)``, compute a matrix ``A'`` of
size (sketch_size, d) so that
.. math:: \|Ax\| \approx \|A'x\|
with high probability via the Clarkson-Woodruff Transform, otherwise
known as the CountSketch matrix.
Parameters
----------
input_matrix : array_like
Input matrix, of shape ``(n, d)``.
sketch_size : int
Number of rows for the sketch.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
A' : array_like
Sketch of the input matrix ``A``, of size ``(sketch_size, d)``.
Notes
-----
To make the statement
.. math:: \|Ax\| \approx \|A'x\|
precise, observe the following result which is adapted from the
proof of Theorem 14 of [2]_ via Markov's Inequality. If we have
a sketch size ``sketch_size=k`` which is at least
.. math:: k \geq \frac{2}{\epsilon^2\delta}
Then for any fixed vector ``x``,
.. math:: \|Ax\| = (1\pm\epsilon)\|A'x\|
with probability at least one minus delta.
This implementation takes advantage of sparsity: computing
a sketch takes time proportional to ``A.nnz``. Data ``A`` which
is in ``scipy.sparse.csc_matrix`` format gives the quickest
computation time for sparse input.
>>> import numpy as np
>>> from scipy import linalg
>>> from scipy import sparse
>>> rng = np.random.default_rng()
>>> n_rows, n_columns, density, sketch_n_rows = 15000, 100, 0.01, 200
>>> A = sparse.rand(n_rows, n_columns, density=density, format='csc')
>>> B = sparse.rand(n_rows, n_columns, density=density, format='csr')
>>> C = sparse.rand(n_rows, n_columns, density=density, format='coo')
>>> D = rng.standard_normal((n_rows, n_columns))
>>> SA = linalg.clarkson_woodruff_transform(A, sketch_n_rows) # fastest
>>> SB = linalg.clarkson_woodruff_transform(B, sketch_n_rows) # fast
>>> SC = linalg.clarkson_woodruff_transform(C, sketch_n_rows) # slower
>>> SD = linalg.clarkson_woodruff_transform(D, sketch_n_rows) # slowest
That said, this method does perform well on dense inputs, just slower
on a relative scale.
References
----------
.. [1] Kenneth L. Clarkson and David P. Woodruff. Low rank approximation
and regression in input sparsity time. In STOC, 2013.
.. [2] David P. Woodruff. Sketching as a tool for numerical linear algebra.
In Foundations and Trends in Theoretical Computer Science, 2014.
Examples
--------
Create a big dense matrix ``A`` for the example:
>>> import numpy as np
>>> from scipy import linalg
>>> n_rows, n_columns = 15000, 100
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((n_rows, n_columns))
Apply the transform to create a new matrix with 200 rows:
>>> sketch_n_rows = 200
>>> sketch = linalg.clarkson_woodruff_transform(A, sketch_n_rows, seed=rng)
>>> sketch.shape
(200, 100)
Now with high probability, the true norm is close to the sketched norm
in absolute value.
>>> linalg.norm(A)
1224.2812927123198
>>> linalg.norm(sketch)
1226.518328407333
Similarly, applying our sketch preserves the solution to a linear
regression of :math:`\min \|Ax - b\|`.
>>> b = rng.standard_normal(n_rows)
>>> x = linalg.lstsq(A, b)[0]
>>> Ab = np.hstack((A, b.reshape(-1, 1)))
>>> SAb = linalg.clarkson_woodruff_transform(Ab, sketch_n_rows, seed=rng)
>>> SA, Sb = SAb[:, :-1], SAb[:, -1]
>>> x_sketched = linalg.lstsq(SA, Sb)[0]
As with the matrix norm example, ``linalg.norm(A @ x - b)`` is close
to ``linalg.norm(A @ x_sketched - b)`` with high probability.
>>> linalg.norm(A @ x - b)
122.83242365433877
>>> linalg.norm(A @ x_sketched - b)
166.58473879945151
"""
S = cwt_matrix(sketch_size, input_matrix.shape[0], seed)
return S.dot(input_matrix)
| 6,145
| 33.144444
| 86
|
py
|
scipy
|
scipy-main/scipy/linalg/_matfuncs_sqrtm_triu.py
|
# pythran export within_block_loop(float64[:,:], float64[:,:],
# (int, int) list, intp)
# pythran export within_block_loop(complex128[:,:], complex128[:,:],
# (int, int) list, intp)
def within_block_loop(R, T, start_stop_pairs, nblocks):
for start, stop in start_stop_pairs:
for j in range(start, stop):
for i in range(j-1, start-1, -1):
s = 0
if j - i > 1:
# s = R[i, i+1:j] @ R[i+1:j, j]
for k in range(i + 1, j):
s += R[i, k] * R[k, j]
denom = R[i, i] + R[j, j]
num = T[i, j] - s
if denom != 0:
R[i, j] = (T[i, j] - s) / denom
elif denom == 0 and num == 0:
R[i, j] = 0
else:
raise RuntimeError('failed to find the matrix square root')
| 966
| 39.291667
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_schur.py
|
"""Schur decomposition functions."""
import numpy
from numpy import asarray_chkfinite, single, asarray, array
from numpy.linalg import norm
# Local imports.
from ._misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
from ._decomp import eigvals
__all__ = ['schur', 'rsf2csf']
_double_precision = ['i', 'l', 'd']
def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
check_finite=True):
"""
Compute Schur decomposition of a matrix.
The Schur decomposition is::
A = Z T Z^H
where Z is unitary and T is either upper-triangular, or for real
Schur decomposition (output='real'), quasi-upper triangular. In
the quasi-triangular form, 2x2 blocks describing complex-valued
eigenvalue pairs may extrude from the diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
output : {'real', 'complex'}, optional
Construct the real or complex Schur decomposition (for real matrices).
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance).
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True).
Alternatively, string parameters may be used::
'lhp' Left-hand plane (x.real < 0.0)
'rhp' Right-hand plane (x.real > 0.0)
'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Schur form of A. It is real-valued for the real Schur decomposition.
Z : (M, M) ndarray
An unitary Schur transformation matrix for A.
It is real-valued for the real Schur decomposition.
sdim : int
If and only if sorting was requested, a third return value will
contain the number of eigenvalues satisfying the sort condition.
Raises
------
LinAlgError
Error raised under three conditions:
1. The algorithm failed due to a failure of the QR algorithm to
compute all eigenvalues.
2. If eigenvalue sorting was requested, the eigenvalues could not be
reordered due to a failure to separate eigenvalues, usually because
of poor conditioning.
3. If eigenvalue sorting was requested, roundoff errors caused the
leading eigenvalues to no longer satisfy the sorting condition.
See Also
--------
rsf2csf : Convert real Schur form to complex Schur form
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import schur, eigvals
>>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
>>> T, Z = schur(A)
>>> T
array([[ 2.65896708, 1.42440458, -1.92933439],
[ 0. , -0.32948354, -0.49063704],
[ 0. , 1.31178921, -0.32948354]])
>>> Z
array([[0.72711591, -0.60156188, 0.33079564],
[0.52839428, 0.79801892, 0.28976765],
[0.43829436, 0.03590414, -0.89811411]])
>>> T2, Z2 = schur(A, output='complex')
>>> T2
array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j],
[ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j],
[ 0. , 0. , -0.32948354-0.80225456j]])
>>> eigvals(T2)
array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j])
An arbitrary custom eig-sorting condition, having positive imaginary part,
which is satisfied by only one eigenvalue
>>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0)
>>> sdim
1
"""
if output not in ['real', 'complex', 'r', 'c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
typ = a1.dtype.char
if output in ['complex', 'c'] and typ not in ['F', 'D']:
if typ in _double_precision:
a1 = a1.astype('D')
typ = 'D'
else:
a1 = a1.astype('F')
typ = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, a))
gees, = get_lapack_funcs(('gees',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
result = gees(lambda x: None, a1, lwork=-1)
lwork = result[-2][0].real.astype(numpy.int_)
if sort is None:
sort_t = 0
def sfunction(x):
return None
else:
sort_t = 1
if callable(sort):
sfunction = sort
elif sort == 'lhp':
def sfunction(x):
return x.real < 0.0
elif sort == 'rhp':
def sfunction(x):
return x.real >= 0.0
elif sort == 'iuc':
def sfunction(x):
return abs(x) <= 1.0
elif sort == 'ouc':
def sfunction(x):
return abs(x) > 1.0
else:
raise ValueError("'sort' parameter must either be 'None', or a "
"callable, or one of ('lhp','rhp','iuc','ouc')")
result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError('illegal value in {}-th argument of internal gees'
''.format(-info))
elif info == a1.shape[0] + 1:
raise LinAlgError('Eigenvalues could not be separated for reordering.')
elif info == a1.shape[0] + 2:
raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
elif info > 0:
raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
if sort_t == 0:
return result[0], result[-3]
else:
return result[0], result[-3], result[1]
eps = numpy.finfo(float).eps
feps = numpy.finfo(single).eps
_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0,
'f': 0, 'd': 0, 'F': 1, 'D': 1}
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
_array_type = [['f', 'd'], ['F', 'D']]
def _commonType(*arrays):
kind = 0
precision = 0
for a in arrays:
t = a.dtype.char
kind = max(kind, _array_kind[t])
precision = max(precision, _array_precision[t])
return _array_type[kind][precision]
def _castCopy(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.char == type:
cast_arrays = cast_arrays + (a.copy(),)
else:
cast_arrays = cast_arrays + (a.astype(type),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def rsf2csf(T, Z, check_finite=True):
"""
Convert real Schur form to complex Schur form.
Convert a quasi-diagonal real-valued Schur form to the upper-triangular
complex-valued Schur form.
Parameters
----------
T : (M, M) array_like
Real Schur form of the original array
Z : (M, M) array_like
Schur transformation matrix
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Complex Schur form of the original array
Z : (M, M) ndarray
Schur transformation matrix corresponding to the complex form
See Also
--------
schur : Schur decomposition of an array
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import schur, rsf2csf
>>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
>>> T, Z = schur(A)
>>> T
array([[ 2.65896708, 1.42440458, -1.92933439],
[ 0. , -0.32948354, -0.49063704],
[ 0. , 1.31178921, -0.32948354]])
>>> Z
array([[0.72711591, -0.60156188, 0.33079564],
[0.52839428, 0.79801892, 0.28976765],
[0.43829436, 0.03590414, -0.89811411]])
>>> T2 , Z2 = rsf2csf(T, Z)
>>> T2
array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j],
[0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j],
[0.+0.j , 0.+0.j, -0.32948354-0.802254558j]])
>>> Z2
array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j],
[0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j],
[0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]])
"""
if check_finite:
Z, T = map(asarray_chkfinite, (Z, T))
else:
Z, T = map(asarray, (Z, T))
for ind, X in enumerate([Z, T]):
if X.ndim != 2 or X.shape[0] != X.shape[1]:
raise ValueError("Input '{}' must be square.".format('ZT'[ind]))
if T.shape[0] != Z.shape[0]:
raise ValueError("Input array shapes must match: Z: {} vs. T: {}"
"".format(Z.shape, T.shape))
N = T.shape[0]
t = _commonType(Z, T, array([3.0], 'F'))
Z, T = _castCopy(t, Z, T)
for m in range(N-1, 0, -1):
if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])):
mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m]
r = norm([mu[0], T[m, m-1]])
c = mu[0] / r
s = T[m, m-1] / r
G = array([[c.conj(), s], [-s, c]], dtype=t)
T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:])
T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T)
Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T)
T[m, m-1] = 0.0
return T, Z
| 10,355
| 33.52
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_expm_frechet.py
|
"""Frechet derivative of the matrix exponential."""
import numpy as np
import scipy.linalg
__all__ = ['expm_frechet', 'expm_cond']
def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):
"""
Frechet derivative of the matrix exponential of A in the direction E.
Parameters
----------
A : (N, N) array_like
Matrix of which to take the matrix exponential.
E : (N, N) array_like
Matrix direction in which to take the Frechet derivative.
method : str, optional
Choice of algorithm. Should be one of
- `SPS` (default)
- `blockEnlarge`
compute_expm : bool, optional
Whether to compute also `expm_A` in addition to `expm_frechet_AE`.
Default is True.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
expm_A : ndarray
Matrix exponential of A.
expm_frechet_AE : ndarray
Frechet derivative of the matrix exponential of A in the direction E.
For ``compute_expm = False``, only `expm_frechet_AE` is returned.
See Also
--------
expm : Compute the exponential of a matrix.
Notes
-----
This section describes the available implementations that can be selected
by the `method` parameter. The default method is *SPS*.
Method *blockEnlarge* is a naive algorithm.
Method *SPS* is Scaling-Pade-Squaring [1]_.
It is a sophisticated implementation which should take
only about 3/8 as much time as the naive implementation.
The asymptotics are the same.
.. versionadded:: 0.13.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
Computing the Frechet Derivative of the Matrix Exponential,
with an application to Condition Number Estimation.
SIAM Journal On Matrix Analysis and Applications.,
30 (4). pp. 1639-1657. ISSN 1095-7162
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((3, 3))
>>> E = rng.standard_normal((3, 3))
>>> expm_A, expm_frechet_AE = linalg.expm_frechet(A, E)
>>> expm_A.shape, expm_frechet_AE.shape
((3, 3), (3, 3))
Create a 6x6 matrix containg [[A, E], [0, A]]:
>>> M = np.zeros((6, 6))
>>> M[:3, :3] = A
>>> M[:3, 3:] = E
>>> M[3:, 3:] = A
>>> expm_M = linalg.expm(M)
>>> np.allclose(expm_A, expm_M[:3, :3])
True
>>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])
True
"""
if check_finite:
A = np.asarray_chkfinite(A)
E = np.asarray_chkfinite(E)
else:
A = np.asarray(A)
E = np.asarray(E)
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be a square matrix')
if E.ndim != 2 or E.shape[0] != E.shape[1]:
raise ValueError('expected E to be a square matrix')
if A.shape != E.shape:
raise ValueError('expected A and E to be the same shape')
if method is None:
method = 'SPS'
if method == 'SPS':
expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)
elif method == 'blockEnlarge':
expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)
else:
raise ValueError('Unknown implementation %s' % method)
if compute_expm:
return expm_A, expm_frechet_AE
else:
return expm_frechet_AE
def expm_frechet_block_enlarge(A, E):
"""
This is a helper function, mostly for testing and profiling.
Return expm(A), frechet(A, E)
"""
n = A.shape[0]
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expm_M = scipy.linalg.expm(M)
return expm_M[:n, :n], expm_M[:n, n:]
"""
Maximal values ell_m of ||2**-s A|| such that the backward error bound
does not exceed 2**-53.
"""
ell_table_61 = (
None,
# 1
2.11e-8,
3.56e-4,
1.08e-2,
6.49e-2,
2.00e-1,
4.37e-1,
7.83e-1,
1.23e0,
1.78e0,
2.42e0,
# 11
3.13e0,
3.90e0,
4.74e0,
5.63e0,
6.56e0,
7.52e0,
8.53e0,
9.56e0,
1.06e1,
1.17e1,
)
# The b vectors and U and V are copypasted
# from scipy.sparse.linalg.matfuncs.py.
# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)
def _diff_pade3(A, E, ident):
b = (120., 60., 12., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
U = A.dot(b[3]*A2 + b[1]*ident)
V = b[2]*A2 + b[0]*ident
Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)
Lv = b[2]*M2
return U, V, Lu, Lv
def _diff_pade5(A, E, ident):
b = (30240., 15120., 3360., 420., 30., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[5]*M4 + b[3]*M2) +
E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade7(A, E, ident):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade9(A, E, ident):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
A8 = np.dot(A4, A4)
M8 = np.dot(A4, M4) + np.dot(M4, A4)
U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def expm_frechet_algo_64(A, E):
n = A.shape[0]
s = None
ident = np.identity(n)
A_norm_1 = scipy.linalg.norm(A, 1)
m_pade_pairs = (
(3, _diff_pade3),
(5, _diff_pade5),
(7, _diff_pade7),
(9, _diff_pade9))
for m, pade in m_pade_pairs:
if A_norm_1 <= ell_table_61[m]:
U, V, Lu, Lv = pade(A, E, ident)
s = 0
break
if s is None:
# scaling
s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))
A = A * 2.0**-s
E = E * 2.0**-s
# pade order 13
A2 = np.dot(A, A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
W1 = b[13]*A6 + b[11]*A4 + b[9]*A2
W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident
Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2
Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
W = np.dot(A6, W1) + W2
U = np.dot(A, W)
V = np.dot(A6, Z1) + Z2
Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2
Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2
Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2
Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2
Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2
Lu = np.dot(A, Lw) + np.dot(E, W)
Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2
# factor once and solve twice
lu_piv = scipy.linalg.lu_factor(-U + V)
R = scipy.linalg.lu_solve(lu_piv, U + V)
L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))
# squaring
for k in range(s):
L = np.dot(R, L) + np.dot(L, R)
R = np.dot(R, R)
return R, L
def vec(M):
"""
Stack columns of M to construct a single vector.
This is somewhat standard notation in linear algebra.
Parameters
----------
M : 2-D array_like
Input matrix
Returns
-------
v : 1-D ndarray
Output vector
"""
return M.T.ravel()
def expm_frechet_kronform(A, method=None, check_finite=True):
"""
Construct the Kronecker form of the Frechet derivative of expm.
Parameters
----------
A : array_like with shape (N, N)
Matrix to be expm'd.
method : str, optional
Extra keyword to be passed to expm_frechet.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
K : 2-D ndarray with shape (N*N, N*N)
Kronecker form of the Frechet derivative of the matrix exponential.
Notes
-----
This function is used to help compute the condition number
of the matrix exponential.
See Also
--------
expm : Compute a matrix exponential.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
expm_cond : Compute the relative condition number of the matrix exponential
in the Frobenius norm.
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
ident = np.identity(n)
cols = []
for i in range(n):
for j in range(n):
E = np.outer(ident[i], ident[j])
F = expm_frechet(A, E,
method=method, compute_expm=False, check_finite=False)
cols.append(vec(F))
return np.vstack(cols).T
def expm_cond(A, check_finite=True):
"""
Relative condition number of the matrix exponential in the Frobenius norm.
Parameters
----------
A : 2-D array_like
Square input matrix with shape (N, N).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
kappa : float
The relative condition number of the matrix exponential
in the Frobenius norm
See Also
--------
expm : Compute the exponential of a matrix.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
Notes
-----
A faster estimate for the condition number in the 1-norm
has been published but is not yet implemented in SciPy.
.. versionadded:: 0.14.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import expm_cond
>>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])
>>> k = expm_cond(A)
>>> k
1.7787805864469866
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
X = scipy.linalg.expm(A)
K = expm_frechet_kronform(A, check_finite=False)
# The following norm choices are deliberate.
# The norms of A and X are Frobenius norms,
# and the norm of K is the induced 2-norm.
A_norm = scipy.linalg.norm(A, 'fro')
X_norm = scipy.linalg.norm(X, 'fro')
K_norm = scipy.linalg.norm(K, 2)
kappa = (K_norm * A_norm) / X_norm
return kappa
| 12,326
| 28.775362
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/decomp_schur.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _decomp_schur
__all__ = [ # noqa: F822
'schur', 'rsf2csf', 'asarray_chkfinite', 'single', 'array', 'norm',
'LinAlgError', 'get_lapack_funcs', 'eigvals', 'eps', 'feps'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.decomp_schur is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.decomp_schur` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_decomp_schur, name)
| 882
| 29.448276
| 77
|
py
|
scipy
|
scipy-main/scipy/linalg/_cython_signature_generator.py
|
"""
A script that uses f2py to generate the signature files used to make
the Cython BLAS and LAPACK wrappers from the fortran source code for
LAPACK and the reference BLAS.
To generate the BLAS wrapper signatures call:
python _cython_signature_generator.py blas <blas_directory> <out_file>
To generate the LAPACK wrapper signatures call:
python _cython_signature_generator.py lapack <lapack_src_directory> <out_file>
This script expects to be run on the source directory for
the oldest supported version of LAPACK (currently 3.4.0).
"""
import glob
import os
from numpy.f2py import crackfortran
sig_types = {'integer': 'int',
'complex': 'c',
'double precision': 'd',
'real': 's',
'complex*16': 'z',
'double complex': 'z',
'character': 'char',
'logical': 'bint'}
def get_type(info, arg):
argtype = sig_types[info['vars'][arg]['typespec']]
if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
argtype = 'z'
return argtype
def make_signature(filename):
info = crackfortran.crackfortran(filename)[0]
name = info['name']
if info['block'] == 'subroutine':
return_type = 'void'
else:
return_type = get_type(info, name)
arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
args = ', '.join(arglist)
# Eliminate strange variable naming that replaces rank with rank_bn.
args = args.replace('rank_bn', 'rank')
return f'{return_type} {name}({args})\n'
def get_sig_name(line):
return line.split('(')[0].split(' ')[-1]
def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
if directory[-1] in ['/', '\\']:
directory = directory[:-1]
files = sorted(glob.glob(directory + '/*.f*'))
if exclusions is None:
exclusions = []
if manual_wrappers is not None:
exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
signatures = []
for filename in files:
name = os.path.splitext(os.path.basename(filename))[0]
if name in exclusions:
continue
signatures.append(make_signature(filename))
if manual_wrappers is not None:
signatures += [l + '\n' for l in manual_wrappers.split('\n')]
signatures.sort(key=get_sig_name)
comment = ["# This file was generated by _cython_signature_generator.py.\n",
"# Do not edit this file directly.\n\n"]
with open(outfile, 'w') as f:
f.writelines(comment)
f.writelines(signatures)
# slamch and dlamch are not in the lapack src directory, but,since they
# already have Python wrappers, we'll wrap them as well.
# The other manual signatures are used because the signature generating
# functions don't work when function pointer arguments are used.
lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
d dlamch(char *cmach)
void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
s slamch(char *cmach)
void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
# Exclude scabs and sisnan since they aren't currently included
# in the scipy-specific ABI wrappers.
blas_exclusions = ['scabs1', 'xerbla']
# Exclude routines with string arguments to avoid
# compatibility woes with different standards for string arguments.
lapack_exclusions = [
# Not included because people should be using the
# C standard library function instead.
# sisnan is also not currently included in the
# ABI wrappers.
'sisnan', 'dlaisnan', 'slaisnan',
# Exclude slaneg because it isn't currently included
# in the ABI wrappers
'slaneg',
# Excluded because they require Fortran string arguments.
'ilaenv', 'iparmq', 'lsamen', 'xerbla',
# Exclude XBLAS routines since they aren't included
# by default.
'cgesvxx', 'dgesvxx', 'sgesvxx', 'zgesvxx',
'cgerfsx', 'dgerfsx', 'sgerfsx', 'zgerfsx',
'cla_gerfsx_extended', 'dla_gerfsx_extended',
'sla_gerfsx_extended', 'zla_gerfsx_extended',
'cla_geamv', 'dla_geamv', 'sla_geamv', 'zla_geamv',
'dla_gercond', 'sla_gercond',
'cla_gercond_c', 'zla_gercond_c',
'cla_gercond_x', 'zla_gercond_x',
'cla_gerpvgrw', 'dla_gerpvgrw',
'sla_gerpvgrw', 'zla_gerpvgrw',
'csysvxx', 'dsysvxx', 'ssysvxx', 'zsysvxx',
'csyrfsx', 'dsyrfsx', 'ssyrfsx', 'zsyrfsx',
'cla_syrfsx_extended', 'dla_syrfsx_extended',
'sla_syrfsx_extended', 'zla_syrfsx_extended',
'cla_syamv', 'dla_syamv', 'sla_syamv', 'zla_syamv',
'dla_syrcond', 'sla_syrcond',
'cla_syrcond_c', 'zla_syrcond_c',
'cla_syrcond_x', 'zla_syrcond_x',
'cla_syrpvgrw', 'dla_syrpvgrw',
'sla_syrpvgrw', 'zla_syrpvgrw',
'cposvxx', 'dposvxx', 'sposvxx', 'zposvxx',
'cporfsx', 'dporfsx', 'sporfsx', 'zporfsx',
'cla_porfsx_extended', 'dla_porfsx_extended',
'sla_porfsx_extended', 'zla_porfsx_extended',
'dla_porcond', 'sla_porcond',
'cla_porcond_c', 'zla_porcond_c',
'cla_porcond_x', 'zla_porcond_x',
'cla_porpvgrw', 'dla_porpvgrw',
'sla_porpvgrw', 'zla_porpvgrw',
'cgbsvxx', 'dgbsvxx', 'sgbsvxx', 'zgbsvxx',
'cgbrfsx', 'dgbrfsx', 'sgbrfsx', 'zgbrfsx',
'cla_gbrfsx_extended', 'dla_gbrfsx_extended',
'sla_gbrfsx_extended', 'zla_gbrfsx_extended',
'cla_gbamv', 'dla_gbamv', 'sla_gbamv', 'zla_gbamv',
'dla_gbrcond', 'sla_gbrcond',
'cla_gbrcond_c', 'zla_gbrcond_c',
'cla_gbrcond_x', 'zla_gbrcond_x',
'cla_gbrpvgrw', 'dla_gbrpvgrw',
'sla_gbrpvgrw', 'zla_gbrpvgrw',
'chesvxx', 'zhesvxx',
'cherfsx', 'zherfsx',
'cla_herfsx_extended', 'zla_herfsx_extended',
'cla_heamv', 'zla_heamv',
'cla_hercond_c', 'zla_hercond_c',
'cla_hercond_x', 'zla_hercond_x',
'cla_herpvgrw', 'zla_herpvgrw',
'sla_lin_berr', 'cla_lin_berr',
'dla_lin_berr', 'zla_lin_berr',
'clarscl2', 'dlarscl2', 'slarscl2', 'zlarscl2',
'clascl2', 'dlascl2', 'slascl2', 'zlascl2',
'cla_wwaddw', 'dla_wwaddw', 'sla_wwaddw', 'zla_wwaddw',
]
if __name__ == '__main__':
from sys import argv
libname, src_dir, outfile = argv[1:]
if libname.lower() == 'blas':
sigs_from_dir(src_dir, outfile, exclusions=blas_exclusions)
elif libname.lower() == 'lapack':
sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
exclusions=lapack_exclusions)
| 10,580
| 55.887097
| 297
|
py
|
scipy
|
scipy-main/scipy/linalg/lapack.py
|
"""
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgecon
dgecon
cgecon
zgecon
sgeequ
dgeequ
cgeequ
zgeequ
sgeequb
dgeequb
cgeequb
zgeequb
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgejsv
dgejsv
sgels
dgels
cgels
zgels
sgels_lwork
dgels_lwork
cgels_lwork
zgels_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgeqrf_lwork
dgeqrf_lwork
cgeqrf_lwork
zgeqrf_lwork
sgeqrfp
dgeqrfp
cgeqrfp
zgeqrfp
sgeqrfp_lwork
dgeqrfp_lwork
cgeqrfp_lwork
zgeqrfp_lwork
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesv
dgesv
cgesv
zgesv
sgesvd
dgesvd
cgesvd
zgesvd
sgesvd_lwork
dgesvd_lwork
cgesvd_lwork
zgesvd_lwork
sgesvx
dgesvx
cgesvx
zgesvx
sgetrf
dgetrf
cgetrf
zgetrf
sgetc2
dgetc2
cgetc2
zgetc2
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgesc2
dgesc2
cgesc2
zgesc2
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
sgglse
dgglse
cgglse
zgglse
sgglse_lwork
dgglse_lwork
cgglse_lwork
zgglse_lwork
sgtsv
dgtsv
cgtsv
zgtsv
sgtsvx
dgtsvx
cgtsvx
zgtsvx
chbevd
zhbevd
chbevx
zhbevx
checon
zhecon
cheequb
zheequb
cheev
zheev
cheev_lwork
zheev_lwork
cheevd
zheevd
cheevd_lwork
zheevd_lwork
cheevr
zheevr
cheevr_lwork
zheevr_lwork
cheevx
zheevx
cheevx_lwork
zheevx_lwork
chegst
zhegst
chegv
zhegv
chegv_lwork
zhegv_lwork
chegvd
zhegvd
chegvx
zhegvx
chegvx_lwork
zhegvx_lwork
chesv
zhesv
chesv_lwork
zhesv_lwork
chesvx
zhesvx
chesvx_lwork
zhesvx_lwork
chetrd
zhetrd
chetrd_lwork
zhetrd_lwork
chetrf
zhetrf
chetrf_lwork
zhetrf_lwork
chfrk
zhfrk
slamch
dlamch
slange
dlange
clange
zlange
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
slasd4
dlasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
sorcsd
dorcsd
sorcsd_lwork
dorcsd_lwork
sorghr
dorghr
sorghr_lwork
dorghr_lwork
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
sormrz
dormrz
sormrz_lwork
dormrz_lwork
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
spftrf
dpftrf
cpftrf
zpftrf
spftri
dpftri
cpftri
zpftri
spftrs
dpftrs
cpftrs
zpftrs
spocon
dpocon
cpocon
zpocon
spstrf
dpstrf
cpstrf
zpstrf
spstf2
dpstf2
cpstf2
zpstf2
sposv
dposv
cposv
zposv
sposvx
dposvx
cposvx
zposvx
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
sppcon
dppcon
cppcon
zppcon
sppsv
dppsv
cppsv
zppsv
spptrf
dpptrf
cpptrf
zpptrf
spptri
dpptri
cpptri
zpptri
spptrs
dpptrs
cpptrs
zpptrs
sptsv
dptsv
cptsv
zptsv
sptsvx
dptsvx
cptsvx
zptsvx
spttrf
dpttrf
cpttrf
zpttrf
spttrs
dpttrs
cpttrs
zpttrs
spteqr
dpteqr
cpteqr
zpteqr
crot
zrot
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
ssfrk
dsfrk
sstebz
dstebz
sstein
dstein
sstemr
dstemr
sstemr_lwork
dstemr_lwork
ssterf
dsterf
sstev
dstev
ssycon
dsycon
csycon
zsycon
ssyconv
dsyconv
csyconv
zsyconv
ssyequb
dsyequb
csyequb
zsyequb
ssyev
dsyev
ssyev_lwork
dsyev_lwork
ssyevd
dsyevd
ssyevd_lwork
dsyevd_lwork
ssyevr
dsyevr
ssyevr_lwork
dsyevr_lwork
ssyevx
dsyevx
ssyevx_lwork
dsyevx_lwork
ssygst
dsygst
ssygv
dsygv
ssygv_lwork
dsygv_lwork
ssygvd
dsygvd
ssygvx
dsygvx
ssygvx_lwork
dsygvx_lwork
ssysv
dsysv
csysv
zsysv
ssysv_lwork
dsysv_lwork
csysv_lwork
zsysv_lwork
ssysvx
dsysvx
csysvx
zsysvx
ssysvx_lwork
dsysvx_lwork
csysvx_lwork
zsysvx_lwork
ssytf2
dsytf2
csytf2
zsytf2
ssytrd
dsytrd
ssytrd_lwork
dsytrd_lwork
ssytrf
dsytrf
csytrf
zsytrf
ssytrf_lwork
dsytrf_lwork
csytrf_lwork
zsytrf_lwork
stbtrs
dtbtrs
ctbtrs
ztbtrs
stfsm
dtfsm
ctfsm
ztfsm
stfttp
dtfttp
ctfttp
ztfttp
stfttr
dtfttr
ctfttr
ztfttr
stgexc
dtgexc
ctgexc
ztgexc
stgsen
dtgsen
ctgsen
ztgsen
stgsen_lwork
dtgsen_lwork
ctgsen_lwork
ztgsen_lwork
stgsyl
dtgsyl
stpttf
dtpttf
ctpttf
ztpttf
stpttr
dtpttr
ctpttr
ztpttr
strexc
dtrexc
ctrexc
ztrexc
strsen
dtrsen
ctrsen
ztrsen
strsen_lwork
dtrsen_lwork
ctrsen_lwork
ztrsen_lwork
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
strttf
dtrttf
ctrttf
ztrttf
strttp
dtrttp
ctrttp
ztrttp
stzrzf
dtzrzf
ctzrzf
ztzrzf
stzrzf_lwork
dtzrzf_lwork
ctzrzf_lwork
ztzrzf_lwork
cunghr
zunghr
cunghr_lwork
zunghr_lwork
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgeqrt
dgeqrt
cgeqrt
zgeqrt
sgemqrt
dgemqrt
cgemqrt
zgemqrt
sgttrf
dgttrf
cgttrf
zgttrf
sgttrs
dgttrs
cgttrs
zgttrs
stpqrt
dtpqrt
ctpqrt
ztpqrt
stpmqrt
dtpmqrt
ctpmqrt
ztpmqrt
cuncsd
zuncsd
cuncsd_lwork
zuncsd_lwork
cunmrz
zunmrz
cunmrz_lwork
zunmrz_lwork
ilaver
"""
#
# Author: Pearu Peterson, March 2002
#
import numpy as _np
from .blas import _get_funcs, _memoize_get_funcs
from scipy.linalg import _flapack
from re import compile as regex_compile
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
try:
from scipy.linalg import _flapack_64
HAS_ILP64 = True
except ImportError:
HAS_ILP64 = False
_flapack_64 = None
# Expose all functions (only flapack --- clapack is an implementation detail)
empty_module = None
from scipy.linalg._flapack import *
del empty_module
__all__ = ['get_lapack_funcs']
# some convenience alias for complex functions
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
# Place guards against docstring rendering issues with special characters
p1 = regex_compile(r'with bounds (?P<b>.*?)( and (?P<s>.*?) storage){0,1}\n')
p2 = regex_compile(r'Default: (?P<d>.*?)\n')
def backtickrepl(m):
if m.group('s'):
return ('with bounds ``{}`` with ``{}`` storage\n'
''.format(m.group('b'), m.group('s')))
else:
return 'with bounds ``{}``\n'.format(m.group('b'))
for routine in [ssyevr, dsyevr, cheevr, zheevr,
ssyevx, dsyevx, cheevx, zheevx,
ssygvd, dsygvd, chegvd, zhegvd]:
if routine.__doc__:
routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)
routine.__doc__ = p2.sub('Default ``\\1``\n', routine.__doc__)
else:
continue
del regex_compile, p1, p2, backtickrepl
@_memoize_get_funcs
def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
"""Return available LAPACK function objects from names.
Arrays are used to determine the optimal prefix of LAPACK routines.
Parameters
----------
names : str or sequence of str
Name(s) of LAPACK functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of LAPACK
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
ilp64 : {True, False, 'preferred'}, optional
Whether to return ILP64 routine variant.
Choosing 'preferred' returns ILP64 routine if available, and
otherwise the 32-bit routine. Default: False
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In LAPACK, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
types {float32, float64, complex64, complex128} respectively, and
are stored in attribute ``typecode`` of the returned functions.
Examples
--------
Suppose we would like to use '?lange' routine which computes the selected
norm of an array. We pass our array in order to get the correct 'lange'
flavor.
>>> import numpy as np
>>> import scipy.linalg as LA
>>> rng = np.random.default_rng()
>>> a = rng.random((3,2))
>>> x_lange = LA.get_lapack_funcs('lange', (a,))
>>> x_lange.typecode
'd'
>>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))
>>> x_lange.typecode
'z'
Several LAPACK routines work best when its internal WORK array has
the optimal size (big enough for fast computation and small enough to
avoid waste of memory). This size is determined also by a dedicated query
to the function which is often wrapped as a standalone function and
commonly denoted as ``###_lwork``. Below is an example for ``?sysv``
>>> a = rng.random((1000, 1000))
>>> b = rng.random((1000, 1)) * 1j
>>> # We pick up zsysv and zsysv_lwork due to b array
... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))
>>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix
>>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))
"""
if isinstance(ilp64, str):
if ilp64 == 'preferred':
ilp64 = HAS_ILP64
else:
raise ValueError("Invalid value for 'ilp64'")
if not ilp64:
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias,
ilp64=False)
else:
if not HAS_ILP64:
raise RuntimeError("LAPACK ILP64 routine requested, but Scipy "
"compiled only with 32-bit BLAS")
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack_64, None,
"flapack_64", None, _lapack_alias,
ilp64=True)
_int32_max = _np.iinfo(_np.int32).max
_int64_max = _np.iinfo(_np.int64).max
def _compute_lwork(routine, *args, **kwargs):
"""
Round floating-point lwork returned by lapack to integer.
Several LAPACK routines compute optimal values for LWORK, which
they return in a floating-point variable. However, for large
values of LWORK, single-precision floating point is not sufficient
to hold the exact value --- some LAPACK versions (<= 3.5.0 at
least) truncate the returned integer to single precision and in
some cases this can be smaller than the required value.
Examples
--------
>>> from scipy.linalg import lapack
>>> n = 5000
>>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))
>>> lwork = lapack._compute_lwork(s_lw, n)
>>> lwork
32000
"""
dtype = getattr(routine, 'dtype', None)
int_dtype = getattr(routine, 'int_dtype', None)
ret = routine(*args, **kwargs)
if ret[-1] != 0:
raise ValueError("Internal work array size computation failed: "
"%d" % (ret[-1],))
if len(ret) == 2:
return _check_work_float(ret[0].real, dtype, int_dtype)
else:
return tuple(_check_work_float(x.real, dtype, int_dtype)
for x in ret[:-1])
def _check_work_float(value, dtype, int_dtype):
"""
Convert LAPACK-returned work array size float to integer,
carefully for single-precision types.
"""
if dtype == _np.float32 or dtype == _np.complex64:
# Single-precision routine -- take next fp value to work
# around possible truncation in LAPACK code
value = _np.nextafter(value, _np.inf, dtype=_np.float32)
value = int(value)
if int_dtype.itemsize == 4:
if value < 0 or value > _int32_max:
raise ValueError("Too large work array required -- computation "
"cannot be performed with standard 32-bit"
" LAPACK.")
elif int_dtype.itemsize == 8:
if value < 0 or value > _int64_max:
raise ValueError("Too large work array required -- computation"
" cannot be performed with standard 64-bit"
" LAPACK.")
return value
| 15,647
| 14.046154
| 77
|
py
|
scipy
|
scipy-main/scipy/linalg/_solvers.py
|
"""Matrix equation solver routines"""
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# February 24, 2012
# Modified: Chad Fulton <ChadFulton@gmail.com>
# June 19, 2014
# Modified: Ilhan Polat <ilhanpolat@gmail.com>
# September 13, 2016
import warnings
import numpy as np
from numpy.linalg import inv, LinAlgError, norm, cond, svd
from ._basic import solve, solve_triangular, matrix_balance
from .lapack import get_lapack_funcs
from ._decomp_schur import schur
from ._decomp_lu import lu
from ._decomp_qr import qr
from ._decomp_qz import ordqz
from ._decomp import _asarray_validated
from ._special_matrices import kron, block_diag
__all__ = ['solve_sylvester',
'solve_continuous_lyapunov', 'solve_discrete_lyapunov',
'solve_lyapunov',
'solve_continuous_are', 'solve_discrete_are']
def solve_sylvester(a, b, q):
"""
Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`.
Parameters
----------
a : (M, M) array_like
Leading matrix of the Sylvester equation
b : (N, N) array_like
Trailing matrix of the Sylvester equation
q : (M, N) array_like
Right-hand side
Returns
-------
x : (M, N) ndarray
The solution to the Sylvester equation.
Raises
------
LinAlgError
If solution was not found
Notes
-----
Computes a solution to the Sylvester matrix equation via the Bartels-
Stewart algorithm. The A and B matrices first undergo Schur
decompositions. The resulting matrices are used to construct an
alternative Sylvester equation (``RY + YS^T = F``) where the R and S
matrices are in quasi-triangular form (or, when R, S or F are complex,
triangular form). The simplified equation is then solved using
``*TRSYL`` from LAPACK directly.
.. versionadded:: 0.11.0
Examples
--------
Given `a`, `b`, and `q` solve for `x`:
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[-3, -2, 0], [-1, -1, 3], [3, -5, -1]])
>>> b = np.array([[1]])
>>> q = np.array([[1],[2],[3]])
>>> x = linalg.solve_sylvester(a, b, q)
>>> x
array([[ 0.0625],
[-0.5625],
[ 0.6875]])
>>> np.allclose(a.dot(x) + x.dot(b), q)
True
"""
# Compute the Schur decomposition form of a
r, u = schur(a, output='real')
# Compute the Schur decomposition of b
s, v = schur(b.conj().transpose(), output='real')
# Construct f = u'*q*v
f = np.dot(np.dot(u.conj().transpose(), q), v)
# Call the Sylvester equation solver
trsyl, = get_lapack_funcs(('trsyl',), (r, s, f))
if trsyl is None:
raise RuntimeError('LAPACK implementation does not contain a proper '
'Sylvester equation solver (TRSYL)')
y, scale, info = trsyl(r, s, f, tranb='C')
y = scale*y
if info < 0:
raise LinAlgError("Illegal value encountered in "
"the %d term" % (-info,))
return np.dot(np.dot(u, y), v.conj().transpose())
def solve_continuous_lyapunov(a, q):
"""
Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`.
Uses the Bartels-Stewart algorithm to find :math:`X`.
Parameters
----------
a : array_like
A square matrix
q : array_like
Right-hand side square matrix
Returns
-------
x : ndarray
Solution to the continuous Lyapunov equation
See Also
--------
solve_discrete_lyapunov : computes the solution to the discrete-time
Lyapunov equation
solve_sylvester : computes the solution to the Sylvester equation
Notes
-----
The continuous Lyapunov equation is a special form of the Sylvester
equation, hence this solver relies on LAPACK routine ?TRSYL.
.. versionadded:: 0.11.0
Examples
--------
Given `a` and `q` solve for `x`:
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[-3, -2, 0], [-1, -1, 0], [0, -5, -1]])
>>> b = np.array([2, 4, -1])
>>> q = np.eye(3)
>>> x = linalg.solve_continuous_lyapunov(a, q)
>>> x
array([[ -0.75 , 0.875 , -3.75 ],
[ 0.875 , -1.375 , 5.3125],
[ -3.75 , 5.3125, -27.0625]])
>>> np.allclose(a.dot(x) + x.dot(a.T), q)
True
"""
a = np.atleast_2d(_asarray_validated(a, check_finite=True))
q = np.atleast_2d(_asarray_validated(q, check_finite=True))
r_or_c = float
for ind, _ in enumerate((a, q)):
if np.iscomplexobj(_):
r_or_c = complex
if not np.equal(*_.shape):
raise ValueError("Matrix {} should be square.".format("aq"[ind]))
# Shape consistency check
if a.shape != q.shape:
raise ValueError("Matrix a and q should have the same shape.")
# Compute the Schur decomposition form of a
r, u = schur(a, output='real')
# Construct f = u'*q*u
f = u.conj().T.dot(q.dot(u))
# Call the Sylvester equation solver
trsyl = get_lapack_funcs('trsyl', (r, f))
dtype_string = 'T' if r_or_c == float else 'C'
y, scale, info = trsyl(r, r, f, tranb=dtype_string)
if info < 0:
raise ValueError('?TRSYL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the ?TRSYL error codes.'
''.format(-info))
elif info == 1:
warnings.warn('Input "a" has an eigenvalue pair whose sum is '
'very close to or exactly zero. The solution is '
'obtained via perturbing the coefficients.',
RuntimeWarning)
y *= scale
return u.dot(y).dot(u.conj().T)
# For backwards compatibility, keep the old name
solve_lyapunov = solve_continuous_lyapunov
def _solve_discrete_lyapunov_direct(a, q):
"""
Solves the discrete Lyapunov equation directly.
This function is called by the `solve_discrete_lyapunov` function with
`method=direct`. It is not supposed to be called directly.
"""
lhs = kron(a, a.conj())
lhs = np.eye(lhs.shape[0]) - lhs
x = solve(lhs, q.flatten())
return np.reshape(x, q.shape)
def _solve_discrete_lyapunov_bilinear(a, q):
"""
Solves the discrete Lyapunov equation using a bilinear transformation.
This function is called by the `solve_discrete_lyapunov` function with
`method=bilinear`. It is not supposed to be called directly.
"""
eye = np.eye(a.shape[0])
aH = a.conj().transpose()
aHI_inv = inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv)
return solve_lyapunov(b.conj().transpose(), -c)
def solve_discrete_lyapunov(a, q, method=None):
"""
Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`.
Parameters
----------
a, q : (M, M) array_like
Square matrices corresponding to A and Q in the equation
above respectively. Must have the same shape.
method : {'direct', 'bilinear'}, optional
Type of solver.
If not given, chosen to be ``direct`` if ``M`` is less than 10 and
``bilinear`` otherwise.
Returns
-------
x : ndarray
Solution to the discrete Lyapunov equation
See Also
--------
solve_continuous_lyapunov : computes the solution to the continuous-time
Lyapunov equation
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *direct* if ``M`` is less than 10
and ``bilinear`` otherwise.
Method *direct* uses a direct analytical solution to the discrete Lyapunov
equation. The algorithm is given in, for example, [1]_. However, it requires
the linear solution of a system with dimension :math:`M^2` so that
performance degrades rapidly for even moderately sized matrices.
Method *bilinear* uses a bilinear transformation to convert the discrete
Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)`
where :math:`B=(A-I)(A+I)^{-1}` and
:math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be
efficiently solved since it is a special case of a Sylvester equation.
The transformation algorithm is from Popov (1964) as described in [2]_.
.. versionadded:: 0.11.0
References
----------
.. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton
University Press, 1994. 265. Print.
http://doc1.lbfl.li/aca/FLMF037168.pdf
.. [2] Gajic, Z., and M.T.J. Qureshi. 2008.
Lyapunov Matrix Equation in System Stability and Control.
Dover Books on Engineering Series. Dover Publications.
Examples
--------
Given `a` and `q` solve for `x`:
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[0.2, 0.5],[0.7, -0.9]])
>>> q = np.eye(2)
>>> x = linalg.solve_discrete_lyapunov(a, q)
>>> x
array([[ 0.70872893, 1.43518822],
[ 1.43518822, -2.4266315 ]])
>>> np.allclose(a.dot(x).dot(a.T)-x, -q)
True
"""
a = np.asarray(a)
q = np.asarray(q)
if method is None:
# Select automatically based on size of matrices
if a.shape[0] >= 10:
method = 'bilinear'
else:
method = 'direct'
meth = method.lower()
if meth == 'direct':
x = _solve_discrete_lyapunov_direct(a, q)
elif meth == 'bilinear':
x = _solve_discrete_lyapunov_bilinear(a, q)
else:
raise ValueError('Unknown solver %s' % method)
return x
def solve_continuous_are(a, b, q, r, e=None, s=None, balanced=True):
r"""
Solves the continuous-time algebraic Riccati equation (CARE).
The CARE is defined as
.. math::
X A + A^H X - X B R^{-1} B^H X + Q = 0
The limitations for a solution to exist are :
* All eigenvalues of :math:`A` on the right half plane, should be
controllable.
* The associated hamiltonian pencil (See Notes), should have
eigenvalues sufficiently away from the imaginary axis.
Moreover, if ``e`` or ``s`` is not precisely ``None``, then the
generalized version of CARE
.. math::
E^HXA + A^HXE - (E^HXB + S) R^{-1} (B^HXE + S^H) + Q = 0
is solved. When omitted, ``e`` is assumed to be the identity and ``s``
is assumed to be the zero matrix with sizes compatible with ``a`` and
``b``, respectively.
Parameters
----------
a : (M, M) array_like
Square matrix
b : (M, N) array_like
Input
q : (M, M) array_like
Input
r : (N, N) array_like
Nonsingular square matrix
e : (M, M) array_like, optional
Nonsingular square matrix
s : (M, N) array_like, optional
Input
balanced : bool, optional
The boolean that indicates whether a balancing step is performed
on the data. The default is set to True.
Returns
-------
x : (M, M) ndarray
Solution to the continuous-time algebraic Riccati equation.
Raises
------
LinAlgError
For cases where the stable subspace of the pencil could not be
isolated. See Notes section and the references for details.
See Also
--------
solve_discrete_are : Solves the discrete-time algebraic Riccati equation
Notes
-----
The equation is solved by forming the extended hamiltonian matrix pencil,
as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
[ A 0 B ] [ E 0 0 ]
[-Q -A^H -S ] - \lambda * [ 0 E^H 0 ]
[ S^H B^H R ] [ 0 0 0 ]
and using a QZ decomposition method.
In this algorithm, the fail conditions are linked to the symmetry
of the product :math:`U_2 U_1^{-1}` and condition number of
:math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
eigenvectors spanning the stable subspace with 2-m rows and partitioned
into two m-row matrices. See [1]_ and [2]_ for more details.
In order to improve the QZ decomposition accuracy, the pencil goes
through a balancing step where the sum of absolute values of
:math:`H` and :math:`J` entries (after removing the diagonal entries of
the sum) is balanced following the recipe given in [3]_.
.. versionadded:: 0.11.0
References
----------
.. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving
Riccati Equations.", SIAM Journal on Scientific and Statistical
Computing, Vol.2(2), :doi:`10.1137/0902010`
.. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
Equations.", Massachusetts Institute of Technology. Laboratory for
Information and Decision Systems. LIDS-R ; 859. Available online :
http://hdl.handle.net/1721.1/1301
.. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
SIAM J. Sci. Comput., 2001, Vol.22(5), :doi:`10.1137/S1064827500367993`
Examples
--------
Given `a`, `b`, `q`, and `r` solve for `x`:
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[4, 3], [-4.5, -3.5]])
>>> b = np.array([[1], [-1]])
>>> q = np.array([[9, 6], [6, 4.]])
>>> r = 1
>>> x = linalg.solve_continuous_are(a, b, q, r)
>>> x
array([[ 21.72792206, 14.48528137],
[ 14.48528137, 9.65685425]])
>>> np.allclose(a.T.dot(x) + x.dot(a)-x.dot(b).dot(b.T).dot(x), -q)
True
"""
# Validate input arguments
a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
a, b, q, r, e, s, 'care')
H = np.empty((2*m+n, 2*m+n), dtype=r_or_c)
H[:m, :m] = a
H[:m, m:2*m] = 0.
H[:m, 2*m:] = b
H[m:2*m, :m] = -q
H[m:2*m, m:2*m] = -a.conj().T
H[m:2*m, 2*m:] = 0. if s is None else -s
H[2*m:, :m] = 0. if s is None else s.conj().T
H[2*m:, m:2*m] = b.conj().T
H[2*m:, 2*m:] = r
if gen_are and e is not None:
J = block_diag(e, e.conj().T, np.zeros_like(r, dtype=r_or_c))
else:
J = block_diag(np.eye(2*m), np.zeros_like(r, dtype=r_or_c))
if balanced:
# xGEBAL does not remove the diagonals before scaling. Also
# to avoid destroying the Symplectic structure, we follow Ref.3
M = np.abs(H) + np.abs(J)
M[np.diag_indices_from(M)] = 0.
_, (sca, _) = matrix_balance(M, separate=1, permute=0)
# do we need to bother?
if not np.allclose(sca, np.ones_like(sca)):
# Now impose diag(D,inv(D)) from Benner where D is
# square root of s_i/s_(n+i) for i=0,....
sca = np.log2(sca)
# NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
s = np.round((sca[m:2*m] - sca[:m])/2)
sca = 2 ** np.r_[s, -s, sca[2*m:]]
# Elementwise multiplication via broadcasting.
elwisescale = sca[:, None] * np.reciprocal(sca)
H *= elwisescale
J *= elwisescale
# Deflate the pencil to 2m x 2m ala Ref.1, eq.(55)
q, r = qr(H[:, -n:])
H = q[:, n:].conj().T.dot(H[:, :2*m])
J = q[:2*m, n:].conj().T.dot(J[:2*m, :2*m])
# Decide on which output type is needed for QZ
out_str = 'real' if r_or_c == float else 'complex'
_, _, _, _, _, u = ordqz(H, J, sort='lhp', overwrite_a=True,
overwrite_b=True, check_finite=False,
output=out_str)
# Get the relevant parts of the stable subspace basis
if e is not None:
u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
u00 = u[:m, :m]
u10 = u[m:, :m]
# Solve via back-substituion after checking the condition of u00
up, ul, uu = lu(u00)
if 1/cond(uu) < np.spacing(1.):
raise LinAlgError('Failed to find a finite solution.')
# Exploit the triangular structure
x = solve_triangular(ul.conj().T,
solve_triangular(uu.conj().T,
u10.conj().T,
lower=True),
unit_diagonal=True,
).conj().T.dot(up.conj().T)
if balanced:
x *= sca[:m, None] * sca[:m]
# Check the deviation from symmetry for lack of success
# See proof of Thm.5 item 3 in [2]
u_sym = u00.conj().T.dot(u10)
n_u_sym = norm(u_sym, 1)
u_sym = u_sym - u_sym.conj().T
sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym])
if norm(u_sym, 1) > sym_threshold:
raise LinAlgError('The associated Hamiltonian pencil has eigenvalues '
'too close to the imaginary axis')
return (x + x.conj().T)/2
def solve_discrete_are(a, b, q, r, e=None, s=None, balanced=True):
r"""
Solves the discrete-time algebraic Riccati equation (DARE).
The DARE is defined as
.. math::
A^HXA - X - (A^HXB) (R + B^HXB)^{-1} (B^HXA) + Q = 0
The limitations for a solution to exist are :
* All eigenvalues of :math:`A` outside the unit disc, should be
controllable.
* The associated symplectic pencil (See Notes), should have
eigenvalues sufficiently away from the unit circle.
Moreover, if ``e`` and ``s`` are not both precisely ``None``, then the
generalized version of DARE
.. math::
A^HXA - E^HXE - (A^HXB+S) (R+B^HXB)^{-1} (B^HXA+S^H) + Q = 0
is solved. When omitted, ``e`` is assumed to be the identity and ``s``
is assumed to be the zero matrix.
Parameters
----------
a : (M, M) array_like
Square matrix
b : (M, N) array_like
Input
q : (M, M) array_like
Input
r : (N, N) array_like
Square matrix
e : (M, M) array_like, optional
Nonsingular square matrix
s : (M, N) array_like, optional
Input
balanced : bool
The boolean that indicates whether a balancing step is performed
on the data. The default is set to True.
Returns
-------
x : (M, M) ndarray
Solution to the discrete algebraic Riccati equation.
Raises
------
LinAlgError
For cases where the stable subspace of the pencil could not be
isolated. See Notes section and the references for details.
See Also
--------
solve_continuous_are : Solves the continuous algebraic Riccati equation
Notes
-----
The equation is solved by forming the extended symplectic matrix pencil,
as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
[ A 0 B ] [ E 0 B ]
[ -Q E^H -S ] - \lambda * [ 0 A^H 0 ]
[ S^H 0 R ] [ 0 -B^H 0 ]
and using a QZ decomposition method.
In this algorithm, the fail conditions are linked to the symmetry
of the product :math:`U_2 U_1^{-1}` and condition number of
:math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
eigenvectors spanning the stable subspace with 2-m rows and partitioned
into two m-row matrices. See [1]_ and [2]_ for more details.
In order to improve the QZ decomposition accuracy, the pencil goes
through a balancing step where the sum of absolute values of
:math:`H` and :math:`J` rows/cols (after removing the diagonal entries)
is balanced following the recipe given in [3]_. If the data has small
numerical noise, balancing may amplify their effects and some clean up
is required.
.. versionadded:: 0.11.0
References
----------
.. [1] P. van Dooren , "A Generalized Eigenvalue Approach For Solving
Riccati Equations.", SIAM Journal on Scientific and Statistical
Computing, Vol.2(2), :doi:`10.1137/0902010`
.. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
Equations.", Massachusetts Institute of Technology. Laboratory for
Information and Decision Systems. LIDS-R ; 859. Available online :
http://hdl.handle.net/1721.1/1301
.. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
SIAM J. Sci. Comput., 2001, Vol.22(5), :doi:`10.1137/S1064827500367993`
Examples
--------
Given `a`, `b`, `q`, and `r` solve for `x`:
>>> import numpy as np
>>> from scipy import linalg as la
>>> a = np.array([[0, 1], [0, -1]])
>>> b = np.array([[1, 0], [2, 1]])
>>> q = np.array([[-4, -4], [-4, 7]])
>>> r = np.array([[9, 3], [3, 1]])
>>> x = la.solve_discrete_are(a, b, q, r)
>>> x
array([[-4., -4.],
[-4., 7.]])
>>> R = la.solve(r + b.T.dot(x).dot(b), b.T.dot(x).dot(a))
>>> np.allclose(a.T.dot(x).dot(a) - x - a.T.dot(x).dot(b).dot(R), -q)
True
"""
# Validate input arguments
a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
a, b, q, r, e, s, 'dare')
# Form the matrix pencil
H = np.zeros((2*m+n, 2*m+n), dtype=r_or_c)
H[:m, :m] = a
H[:m, 2*m:] = b
H[m:2*m, :m] = -q
H[m:2*m, m:2*m] = np.eye(m) if e is None else e.conj().T
H[m:2*m, 2*m:] = 0. if s is None else -s
H[2*m:, :m] = 0. if s is None else s.conj().T
H[2*m:, 2*m:] = r
J = np.zeros_like(H, dtype=r_or_c)
J[:m, :m] = np.eye(m) if e is None else e
J[m:2*m, m:2*m] = a.conj().T
J[2*m:, m:2*m] = -b.conj().T
if balanced:
# xGEBAL does not remove the diagonals before scaling. Also
# to avoid destroying the Symplectic structure, we follow Ref.3
M = np.abs(H) + np.abs(J)
M[np.diag_indices_from(M)] = 0.
_, (sca, _) = matrix_balance(M, separate=1, permute=0)
# do we need to bother?
if not np.allclose(sca, np.ones_like(sca)):
# Now impose diag(D,inv(D)) from Benner where D is
# square root of s_i/s_(n+i) for i=0,....
sca = np.log2(sca)
# NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
s = np.round((sca[m:2*m] - sca[:m])/2)
sca = 2 ** np.r_[s, -s, sca[2*m:]]
# Elementwise multiplication via broadcasting.
elwisescale = sca[:, None] * np.reciprocal(sca)
H *= elwisescale
J *= elwisescale
# Deflate the pencil by the R column ala Ref.1
q_of_qr, _ = qr(H[:, -n:])
H = q_of_qr[:, n:].conj().T.dot(H[:, :2*m])
J = q_of_qr[:, n:].conj().T.dot(J[:, :2*m])
# Decide on which output type is needed for QZ
out_str = 'real' if r_or_c == float else 'complex'
_, _, _, _, _, u = ordqz(H, J, sort='iuc',
overwrite_a=True,
overwrite_b=True,
check_finite=False,
output=out_str)
# Get the relevant parts of the stable subspace basis
if e is not None:
u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
u00 = u[:m, :m]
u10 = u[m:, :m]
# Solve via back-substituion after checking the condition of u00
up, ul, uu = lu(u00)
if 1/cond(uu) < np.spacing(1.):
raise LinAlgError('Failed to find a finite solution.')
# Exploit the triangular structure
x = solve_triangular(ul.conj().T,
solve_triangular(uu.conj().T,
u10.conj().T,
lower=True),
unit_diagonal=True,
).conj().T.dot(up.conj().T)
if balanced:
x *= sca[:m, None] * sca[:m]
# Check the deviation from symmetry for lack of success
# See proof of Thm.5 item 3 in [2]
u_sym = u00.conj().T.dot(u10)
n_u_sym = norm(u_sym, 1)
u_sym = u_sym - u_sym.conj().T
sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym])
if norm(u_sym, 1) > sym_threshold:
raise LinAlgError('The associated symplectic pencil has eigenvalues '
'too close to the unit circle')
return (x + x.conj().T)/2
def _are_validate_args(a, b, q, r, e, s, eq_type='care'):
"""
A helper function to validate the arguments supplied to the
Riccati equation solvers. Any discrepancy found in the input
matrices leads to a ``ValueError`` exception.
Essentially, it performs:
- a check whether the input is free of NaN and Infs
- a pass for the data through ``numpy.atleast_2d()``
- squareness check of the relevant arrays
- shape consistency check of the arrays
- singularity check of the relevant arrays
- symmetricity check of the relevant matrices
- a check whether the regular or the generalized version is asked.
This function is used by ``solve_continuous_are`` and
``solve_discrete_are``.
Parameters
----------
a, b, q, r, e, s : array_like
Input data
eq_type : str
Accepted arguments are 'care' and 'dare'.
Returns
-------
a, b, q, r, e, s : ndarray
Regularized input data
m, n : int
shape of the problem
r_or_c : type
Data type of the problem, returns float or complex
gen_or_not : bool
Type of the equation, True for generalized and False for regular ARE.
"""
if eq_type.lower() not in ("dare", "care"):
raise ValueError("Equation type unknown. "
"Only 'care' and 'dare' is understood")
a = np.atleast_2d(_asarray_validated(a, check_finite=True))
b = np.atleast_2d(_asarray_validated(b, check_finite=True))
q = np.atleast_2d(_asarray_validated(q, check_finite=True))
r = np.atleast_2d(_asarray_validated(r, check_finite=True))
# Get the correct data types otherwise NumPy complains
# about pushing complex numbers into real arrays.
r_or_c = complex if np.iscomplexobj(b) else float
for ind, mat in enumerate((a, q, r)):
if np.iscomplexobj(mat):
r_or_c = complex
if not np.equal(*mat.shape):
raise ValueError("Matrix {} should be square.".format("aqr"[ind]))
# Shape consistency checks
m, n = b.shape
if m != a.shape[0]:
raise ValueError("Matrix a and b should have the same number of rows.")
if m != q.shape[0]:
raise ValueError("Matrix a and q should have the same shape.")
if n != r.shape[0]:
raise ValueError("Matrix b and r should have the same number of cols.")
# Check if the data matrices q, r are (sufficiently) hermitian
for ind, mat in enumerate((q, r)):
if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1))*100:
raise ValueError("Matrix {} should be symmetric/hermitian."
"".format("qr"[ind]))
# Continuous time ARE should have a nonsingular r matrix.
if eq_type == 'care':
min_sv = svd(r, compute_uv=False)[-1]
if min_sv == 0. or min_sv < np.spacing(1.)*norm(r, 1):
raise ValueError('Matrix r is numerically singular.')
# Check if the generalized case is required with omitted arguments
# perform late shape checking etc.
generalized_case = e is not None or s is not None
if generalized_case:
if e is not None:
e = np.atleast_2d(_asarray_validated(e, check_finite=True))
if not np.equal(*e.shape):
raise ValueError("Matrix e should be square.")
if m != e.shape[0]:
raise ValueError("Matrix a and e should have the same shape.")
# numpy.linalg.cond doesn't check for exact zeros and
# emits a runtime warning. Hence the following manual check.
min_sv = svd(e, compute_uv=False)[-1]
if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1):
raise ValueError('Matrix e is numerically singular.')
if np.iscomplexobj(e):
r_or_c = complex
if s is not None:
s = np.atleast_2d(_asarray_validated(s, check_finite=True))
if s.shape != b.shape:
raise ValueError("Matrix b and s should have the same shape.")
if np.iscomplexobj(s):
r_or_c = complex
return a, b, q, r, e, s, m, n, r_or_c, generalized_case
| 28,380
| 32.46816
| 80
|
py
|
scipy
|
scipy-main/scipy/linalg/__init__.py
|
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
.. toctree::
:hidden:
linalg.blas
linalg.cython_blas
linalg.cython_lapack
linalg.interpolative
linalg.lapack
Linear algebra functions.
.. eventually, we should replace the numpy.linalg HTML link with just `numpy.linalg`
.. seealso::
`numpy.linalg <https://www.numpy.org/devdocs/reference/routines.linalg.html>`__
for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
matmul_toeplitz - Multiply a Toeplitz matrix with an array.
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
khatri_rao - Khatri-Rao product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
matrix_balance - Balance matrix entries with a similarity transformation
subspace_angles - Compute the subspace angles between two matrices
bandwidth - Return the lower and upper bandwidth of an array
issymmetric - Check if a square 2D array is symmetric
ishermitian - Check if a square 2D array is Hermitian
LinAlgError
LinAlgWarning
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix
eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
null_space - Construct orthonormal basis for the null space of A using svd
ldl - LDL.T decomposition of a Hermitian or a symmetric matrix.
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
cdf2rdf - Complex diagonal form to real diagonal block form
cossin - Cosine sine decomposition of a unitary or orthogonal matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_continuous_lyapunov - Solve the continuous-time Lyapunov equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
Sketches and Random Projections
===============================
.. autosummary::
:toctree: generated/
clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch)
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
convolution_matrix - Convolution matrix
dft - Discrete Fourier transform matrix
fiedler - Fiedler matrix
fiedler_companion - Fiedler companion matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
""" # noqa: E501
from ._misc import *
from ._cythonized_array_utils import *
from ._basic import *
from ._decomp import *
from ._decomp_lu import *
from ._decomp_ldl import *
from ._decomp_cholesky import *
from ._decomp_qr import *
from ._decomp_qz import *
from ._decomp_svd import *
from ._decomp_schur import *
from ._decomp_polar import *
from ._matfuncs import *
from .blas import *
from .lapack import *
from ._special_matrices import *
from ._solvers import *
from ._procrustes import *
from ._decomp_update import *
from ._sketches import *
from ._decomp_cossin import *
# Deprecated namespaces, to be removed in v2.0.0
from . import (
decomp, decomp_cholesky, decomp_lu, decomp_qr, decomp_svd, decomp_schur,
basic, misc, special_matrices, flinalg, matfuncs
)
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 7,733
| 31.225
| 93
|
py
|
scipy
|
scipy-main/scipy/linalg/_matfuncs.py
|
#
# Author: Travis Oliphant, March 2002
#
from itertools import product
import numpy as np
from numpy import (dot, diag, prod, logical_not, ravel, transpose,
conjugate, absolute, amax, sign, isfinite, triu)
from numpy.lib.scimath import sqrt as csqrt
# Local imports
from scipy.linalg import LinAlgError, bandwidth
from ._misc import norm
from ._basic import solve, inv
from ._decomp_svd import svd
from ._decomp_schur import schur, rsf2csf
from ._expm_frechet import expm_frechet, expm_cond
from ._matfuncs_sqrtm import sqrtm
from ._matfuncs_expm import pick_pade_structure, pade_UV_calc
__all__ = ['expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm', 'tanhm', 'logm',
'funm', 'signm', 'sqrtm', 'fractional_matrix_power', 'expm_frechet',
'expm_cond', 'khatri_rao']
eps = np.finfo('d').eps
feps = np.finfo('f').eps
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
###############################################################################
# Utility functions.
def _asarray_square(A):
"""
Wraps asarray with the extra requirement that the input be a square matrix.
The motivation is that the matfuncs module has real functions that have
been lifted to square matrix functions.
Parameters
----------
A : array_like
A square matrix.
Returns
-------
out : ndarray
An ndarray copy or view or other representation of A.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square array_like input')
return A
def _maybe_real(A, B, tol=None):
"""
Return either B or the real part of B, depending on properties of A and B.
The motivation is that B has been computed as a complicated function of A,
and B may be perturbed by negligible imaginary components.
If A is real and B is complex with small imaginary components,
then return a real copy of B. The assumption in that case would be that
the imaginary components of B are numerical artifacts.
Parameters
----------
A : ndarray
Input array whose type is to be checked as real vs. complex.
B : ndarray
Array to be returned, possibly without its imaginary part.
tol : float
Absolute tolerance.
Returns
-------
out : real or complex array
Either the input array B or only the real part of the input array B.
"""
# Note that booleans and integers compare as real.
if np.isrealobj(A) and np.iscomplexobj(B):
if tol is None:
tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
if np.allclose(B.imag, 0.0, atol=tol):
B = B.real
return B
###############################################################################
# Matrix functions.
def fractional_matrix_power(A, t):
"""
Compute the fractional power of a matrix.
Proceeds according to the discussion in section (6) of [1]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import fractional_matrix_power
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> b = fractional_matrix_power(a, 0.5)
>>> b
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> np.dot(b, b) # Verify square root
array([[ 1., 3.],
[ 1., 4.]])
"""
# This fixes some issue with imports;
# this function calls onenormest which is in scipy.sparse.
A = _asarray_square(A)
import scipy.linalg._matfuncs_inv_ssq
return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
def logm(A, disp=True):
"""
Compute matrix logarithm.
The matrix logarithm is the inverse of
expm: expm(logm(`A`)) == `A`
Parameters
----------
A : (N, N) array_like
Matrix whose logarithm to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import logm, expm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> b = logm(a)
>>> b
array([[-1.02571087, 2.05142174],
[ 0.68380725, 1.02571087]])
>>> expm(b) # Verify expm(logm(a)) returns a
array([[ 1., 3.],
[ 1., 4.]])
"""
A = _asarray_square(A)
# Avoid circular import ... this is OK, right?
import scipy.linalg._matfuncs_inv_ssq
F = scipy.linalg._matfuncs_inv_ssq._logm(A)
F = _maybe_real(A, F)
errtol = 1000*eps
#TODO use a better error approximation
errest = norm(expm(F)-A,1) / norm(A,1)
if disp:
if not isfinite(errest) or errest >= errtol:
print("logm result may be inaccurate, approximate err =", errest)
return F
else:
return F, errest
def expm(A):
"""Compute the matrix exponential of an array.
Parameters
----------
A : ndarray
Input with last two dimensions are square ``(..., n, n)``.
Returns
-------
eA : ndarray
The resulting matrix exponential with the same shape of ``A``
Notes
-----
Implements the algorithm given in [1], which is essentially a Pade
approximation with a variable order that is decided based on the array
data.
For input with size ``n``, the memory usage is in the worst case in the
order of ``8*(n**2)``. If the input data is not of single and double
precision of real and complex dtypes, it is copied to a new array.
For cases ``n >= 400``, the exact 1-norm computation cost, breaks even with
1-norm estimation and from that point on the estimation scheme given in
[2] is used to decide on the approximation order.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham, (2009), "A New Scaling
and Squaring Algorithm for the Matrix Exponential", SIAM J. Matrix
Anal. Appl. 31(3):970-989, :doi:`10.1137/09074721X`
.. [2] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm
for Matrix 1-Norm Estimation, with an Application to 1-Norm
Pseudospectra." SIAM J. Matrix Anal. Appl. 21(4):1185-1201,
:doi:`10.1137/S0895479899356080`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import expm, sinm, cosm
Matrix version of the formula exp(0) = 1:
>>> expm(np.zeros((3, 2, 2)))
array([[[1., 0.],
[0., 1.]],
<BLANKLINE>
[[1., 0.],
[0., 1.]],
<BLANKLINE>
[[1., 0.],
[0., 1.]]])
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
a = np.asarray(A)
if a.size == 1 and a.ndim < 2:
return np.array([[np.exp(a.item())]])
if a.ndim < 2:
raise LinAlgError('The input array must be at least two-dimensional')
if a.shape[-1] != a.shape[-2]:
raise LinAlgError('Last 2 dimensions of the array must be square')
n = a.shape[-1]
# Empty array
if min(*a.shape) == 0:
return np.empty_like(a)
# Scalar case
if a.shape[-2:] == (1, 1):
return np.exp(a)
if not np.issubdtype(a.dtype, np.inexact):
a = a.astype(float)
elif a.dtype == np.float16:
a = a.astype(np.float32)
# Explicit formula for 2x2 case, formula (2.2) in [1]
# without Kahan's method numerical instabilities can occur.
if a.shape[-2:] == (2, 2):
a1, a2, a3, a4 = (a[..., [0], [0]],
a[..., [0], [1]],
a[..., [1], [0]],
a[..., [1], [1]])
mu = csqrt((a1-a4)**2 + 4*a2*a3)/2. # csqrt slow but handles neg.vals
eApD2 = np.exp((a1+a4)/2.)
AmD2 = (a1 - a4)/2.
coshMu = np.cosh(mu)
sinchMu = np.ones_like(coshMu)
mask = mu != 0
sinchMu[mask] = np.sinh(mu[mask]) / mu[mask]
eA = np.empty((a.shape), dtype=mu.dtype)
eA[..., [0], [0]] = eApD2 * (coshMu + AmD2*sinchMu)
eA[..., [0], [1]] = eApD2 * a2 * sinchMu
eA[..., [1], [0]] = eApD2 * a3 * sinchMu
eA[..., [1], [1]] = eApD2 * (coshMu - AmD2*sinchMu)
if np.isrealobj(a):
return eA.real
return eA
# larger problem with unspecified stacked dimensions.
n = a.shape[-1]
eA = np.empty(a.shape, dtype=a.dtype)
# working memory to hold intermediate arrays
Am = np.empty((5, n, n), dtype=a.dtype)
# Main loop to go through the slices of an ndarray and passing to expm
for ind in product(*[range(x) for x in a.shape[:-2]]):
aw = a[ind]
lu = bandwidth(aw)
if not any(lu): # a is diagonal?
eA[ind] = np.diag(np.exp(np.diag(aw)))
continue
# Generic/triangular case; copy the slice into scratch and send.
# Am will be mutated by pick_pade_structure
Am[0, :, :] = aw
m, s = pick_pade_structure(Am)
if s != 0: # scaling needed
Am[:4] *= [[[2**(-s)]], [[4**(-s)]], [[16**(-s)]], [[64**(-s)]]]
pade_UV_calc(Am, n, m)
eAw = Am[0]
if s != 0: # squaring needed
if (lu[1] == 0) or (lu[0] == 0): # lower/upper triangular
# This branch implements Code Fragment 2.1 of [1]
diag_aw = np.diag(aw)
# einsum returns a writable view
np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2**(-s))
# super/sub diagonal
sd = np.diag(aw, k=-1 if lu[1] == 0 else 1)
for i in range(s-1, -1, -1):
eAw = eAw @ eAw
# diagonal
np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2.**(-i))
exp_sd = _exp_sinch(diag_aw * (2.**(-i))) * (sd * 2**(-i))
if lu[1] == 0: # lower
np.einsum('ii->i', eAw[1:, :-1])[:] = exp_sd
else: # upper
np.einsum('ii->i', eAw[:-1, 1:])[:] = exp_sd
else: # generic
for _ in range(s):
eAw = eAw @ eAw
# Zero out the entries from np.empty in case of triangular input
if (lu[0] == 0) or (lu[1] == 0):
eA[ind] = np.triu(eAw) if lu[0] == 0 else np.tril(eAw)
else:
eA[ind] = eAw
return eA
def _exp_sinch(x):
# Higham's formula (10.42), might overflow, see GH-11839
lexp_diff = np.diff(np.exp(x))
l_diff = np.diff(x)
mask_z = l_diff == 0.
lexp_diff[~mask_z] /= l_diff[~mask_z]
lexp_diff[mask_z] = np.exp(x[:-1][mask_z])
return lexp_diff
def cosm(A):
"""
Compute the matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
cosm : (N, N) ndarray
Matrix cosine of A
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return 0.5*(expm(1j*A) + expm(-1j*A))
else:
return expm(1j*A).real
def sinm(A):
"""
Compute the matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinm : (N, N) ndarray
Matrix sine of `A`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return -0.5j*(expm(1j*A) - expm(-1j*A))
else:
return expm(1j*A).imag
def tanm(A):
"""
Compute the matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
tanm : (N, N) ndarray
Matrix tangent of `A`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import tanm, sinm, cosm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> t = tanm(a)
>>> t
array([[ -2.00876993, -8.41880636],
[ -2.80626879, -10.42757629]])
Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
>>> s = sinm(a)
>>> c = cosm(a)
>>> s.dot(np.linalg.inv(c))
array([[ -2.00876993, -8.41880636],
[ -2.80626879, -10.42757629]])
"""
A = _asarray_square(A)
return _maybe_real(A, solve(cosm(A), sinm(A)))
def coshm(A):
"""
Compute the hyperbolic matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
coshm : (N, N) ndarray
Hyperbolic matrix cosine of `A`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> c = coshm(a)
>>> c
array([[ 11.24592233, 38.76236492],
[ 12.92078831, 50.00828725]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> t = tanhm(a)
>>> s = sinhm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
def sinhm(A):
"""
Compute the hyperbolic matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinhm : (N, N) ndarray
Hyperbolic matrix sine of `A`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> s = sinhm(a)
>>> s
array([[ 10.57300653, 39.28826594],
[ 13.09608865, 49.86127247]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> t = tanhm(a)
>>> c = coshm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
def tanhm(A):
"""
Compute the hyperbolic matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
tanhm : (N, N) ndarray
Hyperbolic matrix tangent of `A`
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> t = tanhm(a)
>>> t
array([[ 0.3428582 , 0.51987926],
[ 0.17329309, 0.86273746]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> s = sinhm(a)
>>> c = coshm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, solve(coshm(A), sinhm(A)))
def funm(A, func, disp=True):
"""
Evaluate a matrix function specified by a callable.
Returns the value of matrix-valued function ``f`` at `A`. The
function ``f`` is an extension of the scalar-valued function `func`
to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the function
func : callable
Callable object that evaluates a scalar function f.
Must be vectorized (eg. using vectorize).
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
funm : (N, N) ndarray
Value of the matrix function specified by func evaluated at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Notes
-----
This function implements the general algorithm based on Schur decomposition
(Algorithm 9.1.1. in [1]_).
If the input matrix is known to be diagonalizable, then relying on the
eigendecomposition is likely to be faster. For example, if your matrix is
Hermitian, you can do
>>> from scipy.linalg import eigh
>>> def funm_herm(a, func, check_finite=False):
... w, v = eigh(a, check_finite=check_finite)
... ## if you further know that your matrix is positive semidefinite,
... ## you can optionally guard against precision errors by doing
... # w = np.maximum(w, 0)
... w = func(w)
... return (v * w).dot(v.conj().T)
References
----------
.. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import funm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> funm(a, lambda x: x*x)
array([[ 4., 15.],
[ 5., 19.]])
>>> a.dot(a)
array([[ 4., 15.],
[ 5., 19.]])
"""
A = _asarray_square(A)
# Perform Shur decomposition (lapack ?gees)
T, Z = schur(A)
T, Z = rsf2csf(T,Z)
n,n = T.shape
F = diag(func(diag(T))) # apply function to diagonal elements
F = F.astype(T.dtype.char) # e.g., when F is real but T is complex
minden = abs(T[0,0])
# implement Algorithm 11.1.1 from Golub and Van Loan
# "matrix Computations."
for p in range(1,n):
for i in range(1,n-p+1):
j = i + p
s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
ksl = slice(i,j-1)
val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
s = s + val
den = T[j-1,j-1] - T[i-1,i-1]
if den != 0.0:
s = s / den
F[i-1,j-1] = s
minden = min(minden,abs(den))
F = dot(dot(Z, F), transpose(conjugate(Z)))
F = _maybe_real(A, F)
tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
if minden == 0.0:
minden = tol
err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
if prod(ravel(logical_not(isfinite(F))),axis=0):
err = np.inf
if disp:
if err > 1000*tol:
print("funm result may be inaccurate, approximate err =", err)
return F
else:
return F, err
def signm(A, disp=True):
"""
Matrix sign function.
Extension of the scalar sign(x) to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the sign function
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
signm : (N, N) ndarray
Value of the sign function at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Examples
--------
>>> from scipy.linalg import signm, eigvals
>>> a = [[1,2,3], [1,2,1], [1,1,1]]
>>> eigvals(a)
array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
>>> eigvals(signm(a))
array([-1.+0.j, 1.+0.j, 1.+0.j])
"""
A = _asarray_square(A)
def rounded_sign(x):
rx = np.real(x)
if rx.dtype.char == 'f':
c = 1e3*feps*amax(x)
else:
c = 1e3*eps*amax(x)
return sign((absolute(rx) > c) * rx)
result, errest = funm(A, rounded_sign, disp=0)
errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
if errest < errtol:
return result
# Handle signm of defective matrices:
# See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
# 8:237-250,1981" for how to improve the following (currently a
# rather naive) iteration process:
# a = result # sometimes iteration converges faster but where??
# Shifting to avoid zero eigenvalues. How to ensure that shifting does
# not change the spectrum too much?
vals = svd(A, compute_uv=False)
max_sv = np.amax(vals)
# min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
# c = 0.5/min_nonzero_sv
c = 0.5/max_sv
S0 = A + c*np.identity(A.shape[0])
prev_errest = errest
for i in range(100):
iS0 = inv(S0)
S0 = 0.5*(S0 + iS0)
Pp = 0.5*(dot(S0,S0)+S0)
errest = norm(dot(Pp,Pp)-Pp,1)
if errest < errtol or prev_errest == errest:
break
prev_errest = errest
if disp:
if not isfinite(errest) or errest >= errtol:
print("signm result may be inaccurate, approximate err =", errest)
return S0
else:
return S0, errest
def khatri_rao(a, b):
r"""
Khatri-rao product
A column-wise Kronecker product of two matrices
Parameters
----------
a : (n, k) array_like
Input array
b : (m, k) array_like
Input array
Returns
-------
c: (n*m, k) ndarray
Khatri-rao product of `a` and `b`.
See Also
--------
kron : Kronecker product
Notes
-----
The mathematical definition of the Khatri-Rao product is:
.. math::
(A_{ij} \bigotimes B_{ij})_{ij}
which is the Kronecker product of every column of A and B, e.g.::
c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
>>> linalg.khatri_rao(a, b)
array([[ 3, 8, 15],
[ 6, 14, 24],
[ 2, 6, 27],
[12, 20, 30],
[24, 35, 48],
[ 8, 15, 54]])
"""
a = np.asarray(a)
b = np.asarray(b)
if not (a.ndim == 2 and b.ndim == 2):
raise ValueError("The both arrays should be 2-dimensional.")
if not a.shape[1] == b.shape[1]:
raise ValueError("The number of columns for both arrays "
"should be equal.")
# c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
return c.reshape((-1,) + c.shape[2:])
| 25,030
| 27.412032
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/flinalg.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from . import _flinalg_py
__all__ = ['get_flinalg_funcs', 'has_column_major_storage'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.flinalg is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn("The `scipy.linalg.flinalg` namespace is deprecated and "
"will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(_flinalg_py, name)
| 677
| 27.25
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/blas.py
|
"""
Low-level BLAS functions (:mod:`scipy.linalg.blas`)
===================================================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
----------------------
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
----------------------
.. autosummary::
:toctree: generated/
sgbmv
sgemv
sger
ssbmv
sspr
sspr2
ssymv
ssyr
ssyr2
stbmv
stpsv
strmv
strsv
dgbmv
dgemv
dger
dsbmv
dspr
dspr2
dsymv
dsyr
dsyr2
dtbmv
dtpsv
dtrmv
dtrsv
cgbmv
cgemv
cgerc
cgeru
chbmv
chemv
cher
cher2
chpmv
chpr
chpr2
ctbmv
ctbsv
ctpmv
ctpsv
ctrmv
ctrsv
csyr
zgbmv
zgemv
zgerc
zgeru
zhbmv
zhemv
zher
zher2
zhpmv
zhpr
zhpr2
ztbmv
ztbsv
ztpmv
ztrmv
ztrsv
zsyr
BLAS Level 3 functions
----------------------
.. autosummary::
:toctree: generated/
sgemm
ssymm
ssyr2k
ssyrk
strmm
strsm
dgemm
dsymm
dsyr2k
dsyrk
dtrmm
dtrsm
cgemm
chemm
cher2k
cherk
csymm
csyr2k
csyrk
ctrmm
ctrsm
zgemm
zhemm
zher2k
zherk
zsymm
zsyr2k
zsyrk
ztrmm
ztrsm
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
import functools
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
try:
from scipy.linalg import _fblas_64
HAS_ILP64 = True
except ImportError:
HAS_ILP64 = False
_fblas_64 = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
from scipy.linalg._fblas import *
del empty_module
# all numeric dtypes '?bBhHiIlLqQefdgFDGO' that are safe to be converted to
# single precision float : '?bBhH!!!!!!ef!!!!!!'
# double precision float : '?bBhHiIlLqQefdg!!!!'
# single precision complex : '?bBhH!!!!!!ef!!F!!!'
# double precision complex : '?bBhHiIlLqQefdgFDG!'
_type_score = {x: 1 for x in '?bBhHef'}
_type_score.update({x: 2 for x in 'iIlLqQd'})
# Handle float128(g) and complex256(G) separately in case non-Windows systems.
# On Windows, the values will be rewritten to the same key with the same value.
_type_score.update({'F': 3, 'D': 4, 'g': 2, 'G': 4})
# Final mapping to the actual prefixes and dtypes
_type_conv = {1: ('s', _np.dtype('float32')),
2: ('d', _np.dtype('float64')),
3: ('c', _np.dtype('complex64')),
4: ('z', _np.dtype('complex128'))}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
Examples
--------
>>> import numpy as np
>>> import scipy.linalg.blas as bla
>>> rng = np.random.default_rng()
>>> a = rng.random((10,15))
>>> b = np.asfortranarray(a) # Change the memory layout order
>>> bla.find_best_blas_type((a,))
('d', dtype('float64'), False)
>>> bla.find_best_blas_type((a*1j,))
('z', dtype('complex128'), False)
>>> bla.find_best_blas_type((b,))
('d', dtype('float64'), True)
"""
dtype = _np.dtype(dtype)
max_score = _type_score.get(dtype.char, 5)
prefer_fortran = False
if arrays:
# In most cases, single element is passed through, quicker route
if len(arrays) == 1:
max_score = _type_score.get(arrays[0].dtype.char, 5)
prefer_fortran = arrays[0].flags['FORTRAN']
else:
# use the most generic type in arrays
scores = [_type_score.get(x.dtype.char, 5) for x in arrays]
max_score = max(scores)
ind_max_score = scores.index(max_score)
# safe upcasting for mix of float64 and complex64 --> prefix 'z'
if max_score == 3 and (2 in scores):
max_score = 4
if arrays[ind_max_score].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
# Get the LAPACK prefix and the corresponding dtype if not fall back
# to 'd' and double precision float.
prefix, dtype = _type_conv.get(max_score, ('d', _np.dtype('float64')))
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias,
ilp64=False):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for name in names:
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
f'{lib_name} function {func_name} could not be found')
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
if not ilp64:
func.int_dtype = _np.dtype(_np.intc)
else:
func.int_dtype = _np.dtype(_np.int64)
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def _memoize_get_funcs(func):
"""
Memoized fast path for _get_funcs instances
"""
memo = {}
func.memo = memo
@functools.wraps(func)
def getter(names, arrays=(), dtype=None, ilp64=False):
key = (names, dtype, ilp64)
for array in arrays:
# cf. find_blas_funcs
key += (array.dtype.char, array.flags.fortran)
try:
value = memo.get(key)
except TypeError:
# unhashable key etc.
key = None
value = None
if value is not None:
return value
value = func(names, arrays, dtype, ilp64)
if key is not None:
memo[key] = value
return value
return getter
@_memoize_get_funcs
def get_blas_funcs(names, arrays=(), dtype=None, ilp64=False):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
ilp64 : {True, False, 'preferred'}, optional
Whether to return ILP64 routine variant.
Choosing 'preferred' returns ILP64 routine if available,
and otherwise the 32-bit routine. Default: False
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
Examples
--------
>>> import numpy as np
>>> import scipy.linalg as LA
>>> rng = np.random.default_rng()
>>> a = rng.random((3,2))
>>> x_gemv = LA.get_blas_funcs('gemv', (a,))
>>> x_gemv.typecode
'd'
>>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,))
>>> x_gemv.typecode
'z'
"""
if isinstance(ilp64, str):
if ilp64 == 'preferred':
ilp64 = HAS_ILP64
else:
raise ValueError("Invalid value for 'ilp64'")
if not ilp64:
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias, ilp64=False)
else:
if not HAS_ILP64:
raise RuntimeError("BLAS ILP64 routine requested, but Scipy "
"compiled only with 32-bit BLAS")
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas_64, None, "fblas_64", None,
_blas_alias, ilp64=True)
| 11,677
| 23.078351
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_svd.py
|
"""SVD decomposition functions."""
import numpy
from numpy import zeros, r_, diag, dot, arccos, arcsin, where, clip
# Local imports.
from ._misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs, _compute_lwork
from ._decomp import _asarray_validated
__all__ = ['svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space']
def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False,
check_finite=True, lapack_driver='gesdd'):
"""
Singular Value Decomposition.
Factorizes the matrix `a` into two unitary matrices ``U`` and ``Vh``, and
a 1-D array ``s`` of singular values (real, non-negative) such that
``a == U @ S @ Vh``, where ``S`` is a suitably shaped matrix of zeros with
main diagonal ``s``.
Parameters
----------
a : (M, N) array_like
Matrix to decompose.
full_matrices : bool, optional
If True (default), `U` and `Vh` are of shape ``(M, M)``, ``(N, N)``.
If False, the shapes are ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether to compute also ``U`` and ``Vh`` in addition to ``s``.
Default is True.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance.
Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : {'gesdd', 'gesvd'}, optional
Whether to use the more efficient divide-and-conquer approach
(``'gesdd'``) or general rectangular approach (``'gesvd'``)
to compute the SVD. MATLAB and Octave use the ``'gesvd'`` approach.
Default is ``'gesdd'``.
.. versionadded:: 0.18
Returns
-------
U : ndarray
Unitary matrix having left singular vectors as columns.
Of shape ``(M, M)`` or ``(M, K)``, depending on `full_matrices`.
s : ndarray
The singular values, sorted in non-increasing order.
Of shape (K,), with ``K = min(M, N)``.
Vh : ndarray
Unitary matrix having right singular vectors as rows.
Of shape ``(N, N)`` or ``(K, N)`` depending on `full_matrices`.
For ``compute_uv=False``, only ``s`` is returned.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
svdvals : Compute singular values of a matrix.
diagsvd : Construct the Sigma matrix, given the vector s.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> m, n = 9, 6
>>> a = rng.standard_normal((m, n)) + 1.j*rng.standard_normal((m, n))
>>> U, s, Vh = linalg.svd(a)
>>> U.shape, s.shape, Vh.shape
((9, 9), (6,), (6, 6))
Reconstruct the original matrix from the decomposition:
>>> sigma = np.zeros((m, n))
>>> for i in range(min(m, n)):
... sigma[i, i] = s[i]
>>> a1 = np.dot(U, np.dot(sigma, Vh))
>>> np.allclose(a, a1)
True
Alternatively, use ``full_matrices=False`` (notice that the shape of
``U`` is then ``(m, n)`` instead of ``(m, m)``):
>>> U, s, Vh = linalg.svd(a, full_matrices=False)
>>> U.shape, s.shape, Vh.shape
((9, 6), (6,), (6, 6))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, Vh)))
True
>>> s2 = linalg.svd(a, compute_uv=False)
>>> np.allclose(s, s2)
True
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if not isinstance(lapack_driver, str):
raise TypeError('lapack_driver must be a string')
if lapack_driver not in ('gesdd', 'gesvd'):
raise ValueError('lapack_driver must be "gesdd" or "gesvd", not "%s"'
% (lapack_driver,))
funcs = (lapack_driver, lapack_driver + '_lwork')
gesXd, gesXd_lwork = get_lapack_funcs(funcs, (a1,), ilp64='preferred')
# compute optimal lwork
lwork = _compute_lwork(gesXd_lwork, a1.shape[0], a1.shape[1],
compute_uv=compute_uv, full_matrices=full_matrices)
# perform decomposition
u, s, v, info = gesXd(a1, compute_uv=compute_uv, lwork=lwork,
full_matrices=full_matrices, overwrite_a=overwrite_a)
if info > 0:
raise LinAlgError("SVD did not converge")
if info < 0:
raise ValueError('illegal value in %dth argument of internal gesdd'
% -info)
if compute_uv:
return u, s, v
else:
return s
def svdvals(a, overwrite_a=False, check_finite=True):
"""
Compute singular values of a matrix.
Parameters
----------
a : (M, N) array_like
Matrix to decompose.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance.
Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
s : (min(M, N),) ndarray
The singular values, sorted in decreasing order.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
svd : Compute the full singular value decomposition of a matrix.
diagsvd : Construct the Sigma matrix, given the vector s.
Notes
-----
``svdvals(a)`` only differs from ``svd(a, compute_uv=False)`` by its
handling of the edge case of empty ``a``, where it returns an
empty sequence:
>>> import numpy as np
>>> a = np.empty((0, 2))
>>> from scipy.linalg import svdvals
>>> svdvals(a)
array([], dtype=float64)
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import svdvals
>>> m = np.array([[1.0, 0.0],
... [2.0, 3.0],
... [1.0, 1.0],
... [0.0, 2.0],
... [1.0, 0.0]])
>>> svdvals(m)
array([ 4.28091555, 1.63516424])
We can verify the maximum singular value of `m` by computing the maximum
length of `m.dot(u)` over all the unit vectors `u` in the (x,y) plane.
We approximate "all" the unit vectors with a large sample. Because
of linearity, we only need the unit vectors with angles in [0, pi].
>>> t = np.linspace(0, np.pi, 2000)
>>> u = np.array([np.cos(t), np.sin(t)])
>>> np.linalg.norm(m.dot(u), axis=0).max()
4.2809152422538475
`p` is a projection matrix with rank 1. With exact arithmetic,
its singular values would be [1, 0, 0, 0].
>>> v = np.array([0.1, 0.3, 0.9, 0.3])
>>> p = np.outer(v, v)
>>> svdvals(p)
array([ 1.00000000e+00, 2.02021698e-17, 1.56692500e-17,
8.15115104e-34])
The singular values of an orthogonal matrix are all 1. Here, we
create a random orthogonal matrix by using the `rvs()` method of
`scipy.stats.ortho_group`.
>>> from scipy.stats import ortho_group
>>> orth = ortho_group.rvs(4)
>>> svdvals(orth)
array([ 1., 1., 1., 1.])
"""
a = _asarray_validated(a, check_finite=check_finite)
if a.size:
return svd(a, compute_uv=0, overwrite_a=overwrite_a,
check_finite=False)
elif len(a.shape) != 2:
raise ValueError('expected matrix')
else:
return numpy.empty(0)
def diagsvd(s, M, N):
"""
Construct the sigma matrix in SVD from singular values and size M, N.
Parameters
----------
s : (M,) or (N,) array_like
Singular values
M : int
Size of the matrix whose singular values are `s`.
N : int
Size of the matrix whose singular values are `s`.
Returns
-------
S : (M, N) ndarray
The S-matrix in the singular value decomposition
See Also
--------
svd : Singular value decomposition of a matrix
svdvals : Compute singular values of a matrix.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import diagsvd
>>> vals = np.array([1, 2, 3]) # The array representing the computed svd
>>> diagsvd(vals, 3, 4)
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0]])
>>> diagsvd(vals, 4, 3)
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3],
[0, 0, 0]])
"""
part = diag(s)
typ = part.dtype.char
MorN = len(s)
if MorN == M:
return numpy.hstack((part, zeros((M, N - M), dtype=typ)))
elif MorN == N:
return r_[part, zeros((M - N, N), dtype=typ)]
else:
raise ValueError("Length of s must be M or N.")
# Orthonormal decomposition
def orth(A, rcond=None):
"""
Construct an orthonormal basis for the range of A using SVD
Parameters
----------
A : (M, N) array_like
Input array
rcond : float, optional
Relative condition number. Singular values ``s`` smaller than
``rcond * max(s)`` are considered zero.
Default: floating point eps * max(M,N).
Returns
-------
Q : (M, K) ndarray
Orthonormal basis for the range of A.
K = effective rank of A, as determined by rcond
See Also
--------
svd : Singular value decomposition of a matrix
null_space : Matrix null space
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import orth
>>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array
>>> orth(A)
array([[0., 1.],
[1., 0.]])
>>> orth(A.T)
array([[0., 1.],
[1., 0.],
[0., 0.]])
"""
u, s, vh = svd(A, full_matrices=False)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(M, N)
tol = numpy.amax(s) * rcond
num = numpy.sum(s > tol, dtype=int)
Q = u[:, :num]
return Q
def null_space(A, rcond=None):
"""
Construct an orthonormal basis for the null space of A using SVD
Parameters
----------
A : (M, N) array_like
Input array
rcond : float, optional
Relative condition number. Singular values ``s`` smaller than
``rcond * max(s)`` are considered zero.
Default: floating point eps * max(M,N).
Returns
-------
Z : (N, K) ndarray
Orthonormal basis for the null space of A.
K = dimension of effective null space, as determined by rcond
See Also
--------
svd : Singular value decomposition of a matrix
orth : Matrix range
Examples
--------
1-D null space:
>>> import numpy as np
>>> from scipy.linalg import null_space
>>> A = np.array([[1, 1], [1, 1]])
>>> ns = null_space(A)
>>> ns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector
array([[ 0.70710678],
[-0.70710678]])
2-D null space:
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> B = rng.random((3, 5))
>>> Z = null_space(B)
>>> Z.shape
(5, 2)
>>> np.allclose(B.dot(Z), 0)
True
The basis vectors are orthonormal (up to rounding error):
>>> Z.T.dot(Z)
array([[ 1.00000000e+00, 6.92087741e-17],
[ 6.92087741e-17, 1.00000000e+00]])
"""
u, s, vh = svd(A, full_matrices=True)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(M, N)
tol = numpy.amax(s) * rcond
num = numpy.sum(s > tol, dtype=int)
Q = vh[num:,:].T.conj()
return Q
def subspace_angles(A, B):
r"""
Compute the subspace angles between two matrices.
Parameters
----------
A : (M, N) array_like
The first input array.
B : (M, K) array_like
The second input array.
Returns
-------
angles : ndarray, shape (min(N, K),)
The subspace angles between the column spaces of `A` and `B` in
descending order.
See Also
--------
orth
svd
Notes
-----
This computes the subspace angles according to the formula
provided in [1]_. For equivalence with MATLAB and Octave behavior,
use ``angles[0]``.
.. versionadded:: 1.0
References
----------
.. [1] Knyazev A, Argentati M (2002) Principal Angles between Subspaces
in an A-Based Scalar Product: Algorithms and Perturbation
Estimates. SIAM J. Sci. Comput. 23:2008-2040.
Examples
--------
An Hadamard matrix, which has orthogonal columns, so we expect that
the suspace angle to be :math:`\frac{\pi}{2}`:
>>> import numpy as np
>>> from scipy.linalg import hadamard, subspace_angles
>>> rng = np.random.default_rng()
>>> H = hadamard(4)
>>> print(H)
[[ 1 1 1 1]
[ 1 -1 1 -1]
[ 1 1 -1 -1]
[ 1 -1 -1 1]]
>>> np.rad2deg(subspace_angles(H[:, :2], H[:, 2:]))
array([ 90., 90.])
And the subspace angle of a matrix to itself should be zero:
>>> subspace_angles(H[:, :2], H[:, :2]) <= 2 * np.finfo(float).eps
array([ True, True], dtype=bool)
The angles between non-orthogonal subspaces are in between these extremes:
>>> x = rng.standard_normal((4, 3))
>>> np.rad2deg(subspace_angles(x[:, :2], x[:, [2]]))
array([ 55.832]) # random
"""
# Steps here omit the U and V calculation steps from the paper
# 1. Compute orthonormal bases of column-spaces
A = _asarray_validated(A, check_finite=True)
if len(A.shape) != 2:
raise ValueError(f'expected 2D array, got shape {A.shape}')
QA = orth(A)
del A
B = _asarray_validated(B, check_finite=True)
if len(B.shape) != 2:
raise ValueError(f'expected 2D array, got shape {B.shape}')
if len(B) != len(QA):
raise ValueError('A and B must have the same number of rows, got '
'{} and {}'.format(QA.shape[0], B.shape[0]))
QB = orth(B)
del B
# 2. Compute SVD for cosine
QA_H_QB = dot(QA.T.conj(), QB)
sigma = svdvals(QA_H_QB)
# 3. Compute matrix B
if QA.shape[1] >= QB.shape[1]:
B = QB - dot(QA, QA_H_QB)
else:
B = QA - dot(QB, QA_H_QB.T.conj())
del QA, QB, QA_H_QB
# 4. Compute SVD for sine
mask = sigma ** 2 >= 0.5
if mask.any():
mu_arcsin = arcsin(clip(svdvals(B, overwrite_a=True), -1., 1.))
else:
mu_arcsin = 0.
# 5. Compute the principal angles
# with reverse ordering of sigma because smallest sigma belongs to largest
# angle theta
theta = where(mask, mu_arcsin, arccos(clip(sigma[::-1], -1., 1.)))
return theta
| 14,923
| 28.611111
| 80
|
py
|
scipy
|
scipy-main/scipy/linalg/special_matrices.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _special_matrices
__all__ = [ # noqa: F822
'tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
'hadamard', 'leslie', 'kron', 'block_diag', 'companion',
'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft',
'fiedler', 'fiedler_companion', 'convolution_matrix', 'as_strided'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.special_matrices is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, the"
" `scipy.linalg.special_matrices` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_special_matrices, name)
| 1,026
| 32.129032
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_lu.py
|
"""LU decomposition functions."""
from warnings import warn
from numpy import asarray, asarray_chkfinite
import numpy as np
from itertools import product
# Local imports
from ._misc import _datacopied, LinAlgWarning
from .lapack import get_lapack_funcs
from ._decomp_lu_cython import lu_dispatcher
lapack_cast_dict = {x: ''.join([y for y in 'fdFD' if np.can_cast(x, y)])
for x in np.typecodes['All']}
__all__ = ['lu', 'lu_solve', 'lu_factor']
def lu_factor(a, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, N) array_like
Matrix to decompose
overwrite_a : bool, optional
Whether to overwrite data in A (may increase performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : (M, N) ndarray
Matrix containing U in its upper triangle, and L in its lower triangle.
The unit diagonal elements of L are not stored.
piv : (N,) ndarray
Pivot indices representing the permutation matrix P:
row i of matrix was interchanged with row piv[i].
See Also
--------
lu : gives lu factorization in more user-friendly format
lu_solve : solve an equation system using the LU factorization of a matrix
Notes
-----
This is a wrapper to the ``*GETRF`` routines from LAPACK. Unlike
:func:`lu`, it outputs the L and U factors into a single array
and returns pivot indices instead of a permutation matrix.
While the underlying ``*GETRF`` routines return 1-based pivot indices, the
``piv`` array returned by ``lu_factor`` contains 0-based indices.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import lu_factor
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> lu, piv = lu_factor(A)
>>> piv
array([2, 2, 3, 3], dtype=int32)
Convert LAPACK's ``piv`` array to NumPy index and test the permutation
>>> def pivot_to_permutation(piv):
... perm = np.arange(len(piv))
... for i in range(len(piv)):
... perm[i], perm[piv[i]] = perm[piv[i]], perm[i]
... return perm
...
>>> p_inv = pivot_to_permutation(piv)
>>> p_inv
array([2, 0, 3, 1])
>>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu)
>>> np.allclose(A[p_inv] - L @ U, np.zeros((4, 4)))
True
The P matrix in P L U is defined by the inverse permutation and
can be recovered using argsort:
>>> p = np.argsort(p_inv)
>>> p
array([1, 3, 0, 2])
>>> np.allclose(A - L[p] @ U, np.zeros((4, 4)))
True
or alternatively:
>>> P = np.eye(4)[p]
>>> np.allclose(A - P @ L @ U, np.zeros((4, 4)))
True
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %dth argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
LinAlgWarning, stacklevel=2)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, a x = b, given the LU factorization of a
Parameters
----------
(lu, piv)
Factorization of the coefficient matrix a, as given by lu_factor.
In particular piv are 0-indexed pivot indices.
b : array
Right-hand side
trans : {0, 1, 2}, optional
Type of system to solve:
===== =========
trans system
===== =========
0 a x = b
1 a^T x = b
2 a^H x = b
===== =========
overwrite_b : bool, optional
Whether to overwrite data in b (may increase performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Solution to the system
See Also
--------
lu_factor : LU factorize a matrix
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import lu_factor, lu_solve
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> b = np.array([1, 1, 1, 1])
>>> lu, piv = lu_factor(A)
>>> x = lu_solve((lu, piv), b)
>>> np.allclose(A @ x - b, np.zeros((4,)))
True
"""
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("Shapes of lu {} and b {} are incompatible"
.format(lu.shape, b1.shape))
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %dth argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True,
p_indices=False):
"""
Compute LU decomposition of a matrix with partial pivoting.
The decomposition satisfies::
A = P @ L @ U
where ``P`` is a permutation matrix, ``L`` lower triangular with unit
diagonal elements, and ``U`` upper triangular. If `permute_l` is set to
``True`` then ``L`` is returned already permuted and hence satisfying
``A = L @ U``.
Parameters
----------
a : (M, N) array_like
Array to decompose
permute_l : bool, optional
Perform the multiplication P*L (Default: do not permute)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
p_indices : bool, optional
If ``True`` the permutation information is returned as row indices.
The default is ``False`` for backwards-compatibility reasons.
Returns
-------
**(If `permute_l` is ``False``)**
p : (..., M, M) ndarray
Permutation arrays or vectors depending on `p_indices`
l : (..., M, K) ndarray
Lower triangular or trapezoidal array with unit diagonal.
``K = min(M, N)``
u : (..., K, N) ndarray
Upper triangular or trapezoidal array
**(If `permute_l` is ``True``)**
pl : (..., M, K) ndarray
Permuted L matrix.
``K = min(M, N)``
u : (..., K, N) ndarray
Upper triangular or trapezoidal array
Notes
-----
Permutation matrices are costly since they are nothing but row reorder of
``L`` and hence indices are strongly recommended to be used instead if the
permutation is required. The relation in the 2D case then becomes simply
``A = L[P, :] @ U``. In higher dimensions, it is better to use `permute_l`
to avoid complicated indexing tricks.
In 2D case, if one has the indices however, for some reason, the
permutation matrix is still needed then it can be constructed by
``np.eye(M)[P, :]``.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import lu
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> p, l, u = lu(A)
>>> np.allclose(A, p @ l @ u)
True
>>> p # Permutation matrix
array([[0., 1., 0., 0.], # Row index 1
[0., 0., 0., 1.], # Row index 3
[1., 0., 0., 0.], # Row index 0
[0., 0., 1., 0.]]) # Row index 2
>>> p, _, _ = lu(A, p_indices=True)
>>> p
array([1, 3, 0, 2]) # as given by row indices above
>>> np.allclose(A, l[p, :] @ u)
True
We can also use nd-arrays, for example, a demonstration with 4D array:
>>> rng = np.random.default_rng()
>>> A = rng.uniform(low=-4, high=4, size=[3, 2, 4, 8])
>>> p, l, u = lu(A)
>>> p.shape, l.shape, u.shape
((3, 2, 4, 4), (3, 2, 4, 4), (3, 2, 4, 8))
>>> np.allclose(A, p @ l @ u)
True
>>> PL, U = lu(A, permute_l=True)
>>> np.allclose(A, PL @ U)
True
"""
a1 = np.asarray_chkfinite(a) if check_finite else np.asarray(a)
if a1.ndim < 2:
raise ValueError('The input array must be at least two-dimensional.')
# Also check if dtype is LAPACK compatible
if a1.dtype.char not in 'fdFD':
dtype_char = lapack_cast_dict[a1.dtype.char]
if not dtype_char: # No casting possible
raise TypeError(f'The dtype {a1.dtype} cannot be cast '
'to float(32, 64) or complex(64, 128).')
a1 = a1.astype(dtype_char[0]) # makes a copy, free to scratch
overwrite_a = True
*nd, m, n = a1.shape
k = min(m, n)
real_dchar = 'f' if a1.dtype.char in 'fF' else 'd'
# Empty input
if min(*a1.shape) == 0:
if permute_l:
PL = np.empty(shape=[*nd, m, k], dtype=a1.dtype)
U = np.empty(shape=[*nd, k, n], dtype=a1.dtype)
return PL, U
else:
P = (np.empty([*nd, 0], dtype=np.int32) if p_indices else
np.empty([*nd, 0, 0], dtype=real_dchar))
L = np.empty(shape=[*nd, m, k], dtype=a1.dtype)
U = np.empty(shape=[*nd, k, n], dtype=a1.dtype)
return P, L, U
# Scalar case
if a1.shape[-2:] == (1, 1):
if permute_l:
return np.ones_like(a1), (a1 if overwrite_a else a1.copy())
else:
P = (np.zeros(shape=[*nd, m], dtype=int) if p_indices
else np.ones_like(a1))
return P, np.ones_like(a1), (a1 if overwrite_a else a1.copy())
# Then check overwrite permission
if not _datacopied(a1, a): # "a" still alive through "a1"
if not overwrite_a:
# Data belongs to "a" so make a copy
a1 = a1.copy(order='C')
# else: Do nothing we'll use "a" if possible
# else: a1 has its own data thus free to scratch
# Then layout checks, might happen that overwrite is allowed but original
# array was read-only or non-contiguous.
if not (a1.flags['C_CONTIGUOUS'] and a1.flags['WRITEABLE']):
a1 = a1.copy(order='C')
if not nd: # 2D array
p = np.empty(m, dtype=np.int32)
u = np.zeros([k, k], dtype=a1.dtype)
lu_dispatcher(a1, u, p, permute_l)
P, L, U = (p, a1, u) if m > n else (p, u, a1)
else: # Stacked array
# Prepare the contiguous data holders
P = np.empty([*nd, m], dtype=np.int32) # perm vecs
if m > n: # Tall arrays, U will be created
U = np.zeros([*nd, k, k], dtype=a1.dtype)
for ind in product(*[range(x) for x in a1.shape[:-2]]):
lu_dispatcher(a1[ind], U[ind], P[ind], permute_l)
L = a1
else: # Fat arrays, L will be created
L = np.zeros([*nd, k, k], dtype=a1.dtype)
for ind in product(*[range(x) for x in a1.shape[:-2]]):
lu_dispatcher(a1[ind], L[ind], P[ind], permute_l)
U = a1
# Convert permutation vecs to permutation arrays
# permute_l=False needed to enter here to avoid wasted efforts
if (not p_indices) and (not permute_l):
if nd:
Pa = np.zeros([*nd, m, m], dtype=real_dchar)
# An unreadable index hack - One-hot encoding for perm matrices
nd_ix = np.ix_(*([np.arange(x) for x in nd]+[np.arange(m)]))
Pa[(*nd_ix, P)] = 1
P = Pa
else: # 2D case
Pa = np.zeros([m, m], dtype=real_dchar)
Pa[np.arange(m), P] = 1
P = Pa
return (L, U) if permute_l else (P, L, U)
| 12,573
| 32.530667
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_cholesky.py
|
"""Cholesky decomposition functions."""
from numpy import asarray_chkfinite, asarray, atleast_2d
# Local imports
from ._misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',
'cho_solve_banded']
def _cholesky(a, lower=False, overwrite_a=False, clean=True,
check_finite=True):
"""Common code for cholesky() and cho_factor()."""
a1 = asarray_chkfinite(a) if check_finite else asarray(a)
a1 = atleast_2d(a1)
# Dimension check
if a1.ndim != 2:
raise ValueError('Input array needs to be 2D but received '
'a {}d-array.'.format(a1.ndim))
# Squareness check
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input array is expected to be square but has '
'the shape: {}.'.format(a1.shape))
# Quick return for square empty array
if a1.size == 0:
return a1.copy(), lower
overwrite_a = overwrite_a or _datacopied(a1, a)
potrf, = get_lapack_funcs(('potrf',), (a1,))
c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)
if info > 0:
raise LinAlgError("%d-th leading minor of the array is not positive "
"definite" % info)
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'on entry to "POTRF".'.format(-info))
return c, lower
def cholesky(a, lower=False, overwrite_a=False, check_finite=True):
"""
Compute the Cholesky decomposition of a matrix.
Returns the Cholesky decomposition, :math:`A = L L^*` or
:math:`A = U^* U` of a Hermitian positive-definite matrix A.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper- or lower-triangular Cholesky
factorization. Default is upper-triangular.
overwrite_a : bool, optional
Whether to overwrite data in `a` (may improve performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
c : (M, M) ndarray
Upper- or lower-triangular Cholesky factor of `a`.
Raises
------
LinAlgError : if decomposition fails.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import cholesky
>>> a = np.array([[1,-2j],[2j,5]])
>>> L = cholesky(a, lower=True)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> L @ L.T.conj()
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
"""
c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True,
check_finite=check_finite)
return c
def cho_factor(a, lower=False, overwrite_a=False, check_finite=True):
"""
Compute the Cholesky decomposition of a matrix, to use in cho_solve
Returns a matrix containing the Cholesky decomposition,
``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.
The return value can be directly used as the first parameter to cho_solve.
.. warning::
The returned matrix also contains random data in the entries not
used by the Cholesky decomposition. If you need to zero these
entries, use the function `cholesky` instead.
Parameters
----------
a : (M, M) array_like
Matrix to be decomposed
lower : bool, optional
Whether to compute the upper or lower triangular Cholesky factorization
(Default: upper-triangular)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
c : (M, M) ndarray
Matrix whose upper or lower triangle contains the Cholesky factor
of `a`. Other parts of the matrix contain random data.
lower : bool
Flag indicating whether the factor is in the lower or upper triangle
Raises
------
LinAlgError
Raised if decomposition fails.
See Also
--------
cho_solve : Solve a linear set equations using the Cholesky factorization
of a matrix.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import cho_factor
>>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])
>>> c, low = cho_factor(A)
>>> c
array([[3. , 1. , 0.33333333, 1.66666667],
[3. , 2.44948974, 1.90515869, -0.27216553],
[1. , 5. , 2.29330749, 0.8559528 ],
[5. , 1. , 2. , 1.55418563]])
>>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4)))
True
"""
c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,
check_finite=check_finite)
return c, lower
def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True):
"""Solve the linear equations A x = b, given the Cholesky factorization of A.
Parameters
----------
(c, lower) : tuple, (array, bool)
Cholesky factorization of a, as given by cho_factor
b : array
Right-hand side
overwrite_b : bool, optional
Whether to overwrite data in b (may improve performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
The solution to the system A x = b
See Also
--------
cho_factor : Cholesky factorization of a matrix
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import cho_factor, cho_solve
>>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])
>>> c, low = cho_factor(A)
>>> x = cho_solve((c, low), [1, 1, 1, 1])
>>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4))
True
"""
(c, lower) = c_and_lower
if check_finite:
b1 = asarray_chkfinite(b)
c = asarray_chkfinite(c)
else:
b1 = asarray(b)
c = asarray(c)
if c.ndim != 2 or c.shape[0] != c.shape[1]:
raise ValueError("The factored matrix c is not square.")
if c.shape[1] != b1.shape[0]:
raise ValueError("incompatible dimensions ({} and {})"
.format(c.shape, b1.shape))
overwrite_b = overwrite_b or _datacopied(b1, b)
potrs, = get_lapack_funcs(('potrs',), (c, b1))
x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)
if info != 0:
raise ValueError('illegal value in %dth argument of internal potrs'
% -info)
return x
def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True):
"""
Cholesky decompose a banded Hermitian positive-definite matrix
The matrix a is stored in ab either in lower-diagonal or upper-
diagonal ordered form::
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of ab (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
ab : (u + 1, M) array_like
Banded matrix
overwrite_ab : bool, optional
Discard data in ab (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
c : (u + 1, M) ndarray
Cholesky factorization of a, in the same banded format as ab
See Also
--------
cho_solve_banded :
Solve a linear set equations, given the Cholesky factorization
of a banded Hermitian.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import cholesky_banded
>>> from numpy import allclose, zeros, diag
>>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])
>>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)
>>> A = A + A.conj().T + np.diag(Ab[2, :])
>>> c = cholesky_banded(Ab)
>>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :])
>>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5)))
True
"""
if check_finite:
ab = asarray_chkfinite(ab)
else:
ab = asarray(ab)
pbtrf, = get_lapack_funcs(('pbtrf',), (ab,))
c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbtrf'
% -info)
return c
def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True):
"""
Solve the linear equations ``A x = b``, given the Cholesky factorization of
the banded Hermitian ``A``.
Parameters
----------
(cb, lower) : tuple, (ndarray, bool)
`cb` is the Cholesky factorization of A, as given by cholesky_banded.
`lower` must be the same value that was given to cholesky_banded.
b : array_like
Right-hand side
overwrite_b : bool, optional
If True, the function will overwrite the values in `b`.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
The solution to the system A x = b
See Also
--------
cholesky_banded : Cholesky factorization of a banded matrix
Notes
-----
.. versionadded:: 0.8.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import cholesky_banded, cho_solve_banded
>>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])
>>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)
>>> A = A + A.conj().T + np.diag(Ab[2, :])
>>> c = cholesky_banded(Ab)
>>> x = cho_solve_banded((c, False), np.ones(5))
>>> np.allclose(A @ x - np.ones(5), np.zeros(5))
True
"""
(cb, lower) = cb_and_lower
if check_finite:
cb = asarray_chkfinite(cb)
b = asarray_chkfinite(b)
else:
cb = asarray(cb)
b = asarray(b)
# Validate shapes.
if cb.shape[-1] != b.shape[0]:
raise ValueError("shapes of cb and b are not compatible.")
pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b))
x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal pbtrs'
% -info)
return x
| 11,903
| 32.158774
| 81
|
py
|
scipy
|
scipy-main/scipy/linalg/decomp_cholesky.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _decomp_cholesky
__all__ = [ # noqa: F822
'cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',
'cho_solve_banded', 'asarray_chkfinite', 'atleast_2d',
'LinAlgError', 'get_lapack_funcs'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.decomp_cholesky is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, the"
" `scipy.linalg.decomp_cholesky` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_decomp_cholesky, name)
| 917
| 29.6
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/basic.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.linalg` namespace for importing the functions
# included below.
import warnings
from . import _basic
__all__ = [ # noqa: F822
'solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinvh', 'matrix_balance', 'matmul_toeplitz',
'atleast_1d', 'atleast_2d', 'get_flinalg_funcs', 'get_lapack_funcs',
'LinAlgError', 'LinAlgWarning', 'levinson'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.linalg.basic is deprecated and has no attribute "
f"{name}. Try looking in scipy.linalg instead.")
warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
"the `scipy.linalg.basic` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_basic, name)
| 1,026
| 31.09375
| 76
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp.py
|
#
# Author: Pearu Peterson, March 2002
#
# additions by Travis Oliphant, March 2002
# additions by Eric Jones, June 2002
# additions by Johannes Loehnert, June 2006
# additions by Bart Vandereycken, June 2006
# additions by Andrew D Straw, May 2007
# additions by Tiziano Zito, November 2008
#
# April 2010: Functions for LU, QR, SVD, Schur, and Cholesky decompositions
# were moved to their own files. Still in this file are functions for
# eigenstuff and for the Hessenberg form.
__all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh',
'eig_banded', 'eigvals_banded',
'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf']
import warnings
import numpy
from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast,
flatnonzero, conj, asarray, argsort, empty,
iscomplex, zeros, einsum, eye, inf)
# Local imports
from scipy._lib._util import _asarray_validated
from ._misc import LinAlgError, _datacopied, norm
from .lapack import get_lapack_funcs, _compute_lwork
from scipy._lib.deprecation import _NoValue
_I = cast['F'](1j)
def _make_complex_eigvecs(w, vin, dtype):
"""
Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output
"""
# - see LAPACK man page DGGEV at ALPHAI
v = numpy.array(vin, dtype=dtype)
m = (w.imag > 0)
m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709
for i in flatnonzero(m):
v.imag[:, i] = vin[:, i+1]
conj(v[:, i], v[:, i+1])
return v
def _make_eigvals(alpha, beta, homogeneous_eigvals):
if homogeneous_eigvals:
if beta is None:
return numpy.vstack((alpha, numpy.ones_like(alpha)))
else:
return numpy.vstack((alpha, beta))
else:
if beta is None:
return alpha
else:
w = numpy.empty_like(alpha)
alpha_zero = (alpha == 0)
beta_zero = (beta == 0)
beta_nonzero = ~beta_zero
w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero]
# Use numpy.inf for complex values too since
# 1/numpy.inf = 0, i.e., it correctly behaves as projective
# infinity.
w[~alpha_zero & beta_zero] = numpy.inf
if numpy.all(alpha.imag == 0):
w[alpha_zero & beta_zero] = numpy.nan
else:
w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan)
return w
def _geneig(a1, b1, left, right, overwrite_a, overwrite_b,
homogeneous_eigvals):
ggev, = get_lapack_funcs(('ggev',), (a1, b1))
cvl, cvr = left, right
res = ggev(a1, b1, lwork=-1)
lwork = res[-2][0].real.astype(numpy.int_)
if ggev.typecode in 'cz':
alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork,
overwrite_a, overwrite_b)
w = _make_eigvals(alpha, beta, homogeneous_eigvals)
else:
alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr,
lwork, overwrite_a,
overwrite_b)
alpha = alphar + _I * alphai
w = _make_eigvals(alpha, beta, homogeneous_eigvals)
_check_info(info, 'generalized eig algorithm (ggev)')
only_real = numpy.all(w.imag == 0.0)
if not (ggev.typecode in 'cz' or only_real):
t = w.dtype.char
if left:
vl = _make_complex_eigvecs(w, vl, t)
if right:
vr = _make_complex_eigvecs(w, vr, t)
# the eigenvectors returned by the lapack function are NOT normalized
for i in range(vr.shape[0]):
if right:
vr[:, i] /= norm(vr[:, i])
if left:
vl[:, i] /= norm(vl[:, i])
if not (left or right):
return w
if left:
if right:
return w, vl, vr
return w, vl
return w, vr
def eig(a, b=None, left=False, right=True, overwrite_a=False,
overwrite_b=False, check_finite=True, homogeneous_eigvals=False):
"""
Solve an ordinary or generalized eigenvalue problem of a square matrix.
Find eigenvalues w and right or left eigenvectors of a general matrix::
a vr[:,i] = w[i] b vr[:,i]
a.H vl[:,i] = w[i].conj() b.H vl[:,i]
where ``.H`` is the Hermitian conjugation.
Parameters
----------
a : (M, M) array_like
A complex or real matrix whose eigenvalues and eigenvectors
will be computed.
b : (M, M) array_like, optional
Right-hand side matrix in a generalized eigenvalue problem.
Default is None, identity matrix is assumed.
left : bool, optional
Whether to calculate and return left eigenvectors. Default is False.
right : bool, optional
Whether to calculate and return right eigenvectors. Default is True.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance. Default is False.
overwrite_b : bool, optional
Whether to overwrite `b`; may improve performance. Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
homogeneous_eigvals : bool, optional
If True, return the eigenvalues in homogeneous coordinates.
In this case ``w`` is a (2, M) array so that::
w[1,i] a vr[:,i] = w[0,i] b vr[:,i]
Default is False.
Returns
-------
w : (M,) or (2, M) double or complex ndarray
The eigenvalues, each repeated according to its
multiplicity. The shape is (M,) unless
``homogeneous_eigvals=True``.
vl : (M, M) double or complex ndarray
The normalized left eigenvector corresponding to the eigenvalue
``w[i]`` is the column vl[:,i]. Only returned if ``left=True``.
vr : (M, M) double or complex ndarray
The normalized right eigenvector corresponding to the eigenvalue
``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of general arrays
eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays.
eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
band matrices
eigh_tridiagonal : eigenvalues and right eiegenvectors for
symmetric/Hermitian tridiagonal matrices
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[0., -1.], [1., 0.]])
>>> linalg.eigvals(a)
array([0.+1.j, 0.-1.j])
>>> b = np.array([[0., 1.], [1., 1.]])
>>> linalg.eigvals(a, b)
array([ 1.+0.j, -1.+0.j])
>>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])
>>> linalg.eigvals(a, homogeneous_eigvals=True)
array([[3.+0.j, 8.+0.j, 7.+0.j],
[1.+0.j, 1.+0.j, 1.+0.j]])
>>> a = np.array([[0., -1.], [1., 0.]])
>>> linalg.eigvals(a) == linalg.eig(a)[0]
array([ True, True])
>>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector
array([[-0.70710678+0.j , -0.70710678-0.j ],
[-0. +0.70710678j, -0. -0.70710678j]])
>>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
if b is not None:
b1 = _asarray_validated(b, check_finite=check_finite)
overwrite_b = overwrite_b or _datacopied(b1, b)
if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
raise ValueError('expected square matrix')
if b1.shape != a1.shape:
raise ValueError('a and b must have the same shape')
return _geneig(a1, b1, left, right, overwrite_a, overwrite_b,
homogeneous_eigvals)
geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,))
compute_vl, compute_vr = left, right
lwork = _compute_lwork(geev_lwork, a1.shape[0],
compute_vl=compute_vl,
compute_vr=compute_vr)
if geev.typecode in 'cz':
w, vl, vr, info = geev(a1, lwork=lwork,
compute_vl=compute_vl,
compute_vr=compute_vr,
overwrite_a=overwrite_a)
w = _make_eigvals(w, None, homogeneous_eigvals)
else:
wr, wi, vl, vr, info = geev(a1, lwork=lwork,
compute_vl=compute_vl,
compute_vr=compute_vr,
overwrite_a=overwrite_a)
t = {'f': 'F', 'd': 'D'}[wr.dtype.char]
w = wr + _I * wi
w = _make_eigvals(w, None, homogeneous_eigvals)
_check_info(info, 'eig algorithm (geev)',
positive='did not converge (only eigenvalues '
'with order >= %d have converged)')
only_real = numpy.all(w.imag == 0.0)
if not (geev.typecode in 'cz' or only_real):
t = w.dtype.char
if left:
vl = _make_complex_eigvecs(w, vl, t)
if right:
vr = _make_complex_eigvecs(w, vr, t)
if not (left or right):
return w
if left:
if right:
return w, vl, vr
return w, vl
return w, vr
def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False,
overwrite_b=False, turbo=_NoValue, eigvals=_NoValue, type=1,
check_finite=True, subset_by_index=None, subset_by_value=None,
driver=None):
"""
Solve a standard or generalized eigenvalue problem for a complex
Hermitian or real symmetric matrix.
Find eigenvalues array ``w`` and optionally eigenvectors array ``v`` of
array ``a``, where ``b`` is positive definite such that for every
eigenvalue λ (i-th entry of w) and its eigenvector ``vi`` (i-th column of
``v``) satisfies::
a @ vi = λ * b @ vi
vi.conj().T @ a @ vi = λ
vi.conj().T @ b @ vi = 1
In the standard problem, ``b`` is assumed to be the identity matrix.
Parameters
----------
a : (M, M) array_like
A complex Hermitian or real symmetric matrix whose eigenvalues and
eigenvectors will be computed.
b : (M, M) array_like, optional
A complex Hermitian or real symmetric definite positive matrix in.
If omitted, identity matrix is assumed.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of ``a`` and, if applicable, ``b``. (Default: lower)
eigvals_only : bool, optional
Whether to calculate only eigenvalues and no eigenvectors.
(Default: both are calculated)
subset_by_index : iterable, optional
If provided, this two-element iterable defines the start and the end
indices of the desired eigenvalues (ascending order and 0-indexed).
To return only the second smallest to fifth smallest eigenvalues,
``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only
available with "evr", "evx", and "gvx" drivers. The entries are
directly converted to integers via ``int()``.
subset_by_value : iterable, optional
If provided, this two-element iterable defines the half-open interval
``(a, b]`` that, if any, only the eigenvalues between these values
are returned. Only available with "evr", "evx", and "gvx" drivers. Use
``np.inf`` for the unconstrained ends.
driver : str, optional
Defines which LAPACK driver should be used. Valid options are "ev",
"evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for
generalized (where b is not None) problems. See the Notes section.
The default for standard problems is "evr". For generalized problems,
"gvd" is used for full set, and "gvx" for subset requested cases.
type : int, optional
For the generalized problems, this keyword specifies the problem type
to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible
inputs)::
1 => a @ v = w @ b @ v
2 => a @ b @ v = w @ v
3 => b @ a @ v = w @ v
This keyword is ignored for standard problems.
overwrite_a : bool, optional
Whether to overwrite data in ``a`` (may improve performance). Default
is False.
overwrite_b : bool, optional
Whether to overwrite data in ``b`` (may improve performance). Default
is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
turbo : bool, optional, deprecated
.. deprecated:: 1.5.0
`eigh` keyword argument `turbo` is deprecated in favour of
``driver=gvd`` keyword instead and will be removed in SciPy
1.14.0.
eigvals : tuple (lo, hi), optional, deprecated
.. deprecated:: 1.5.0
`eigh` keyword argument `eigvals` is deprecated in favour of
`subset_by_index` keyword instead and will be removed in SciPy
1.14.0.
Returns
-------
w : (N,) ndarray
The N (1<=N<=M) selected eigenvalues, in ascending order, each
repeated according to its multiplicity.
v : (M, N) ndarray
(if ``eigvals_only == False``)
Raises
------
LinAlgError
If eigenvalue computation does not converge, an error occurred, or
b matrix is not definite positive. Note that if input matrices are
not symmetric or Hermitian, no error will be reported but results will
be wrong.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays
eig : eigenvalues and right eigenvectors for non-symmetric arrays
eigh_tridiagonal : eigenvalues and right eiegenvectors for
symmetric/Hermitian tridiagonal matrices
Notes
-----
This function does not check the input array for being Hermitian/symmetric
in order to allow for representing arrays with only their upper/lower
triangular parts. Also, note that even though not taken into account,
finiteness check applies to the whole array and unaffected by "lower"
keyword.
This function uses LAPACK drivers for computations in all possible keyword
combinations, prefixed with ``sy`` if arrays are real and ``he`` if
complex, e.g., a float array with "evr" driver is solved via
"syevr", complex arrays with "gvx" driver problem is solved via "hegvx"
etc.
As a brief summary, the slowest and the most robust driver is the
classical ``<sy/he>ev`` which uses symmetric QR. ``<sy/he>evr`` is seen as
the optimal choice for the most general cases. However, there are certain
occasions that ``<sy/he>evd`` computes faster at the expense of more
memory usage. ``<sy/he>evx``, while still being faster than ``<sy/he>ev``,
often performs worse than the rest except when very few eigenvalues are
requested for large arrays though there is still no performance guarantee.
For the generalized problem, normalization with respect to the given
type argument::
type 1 and 3 : v.conj().T @ a @ v = w
type 2 : inv(v).conj().T @ a @ inv(v) = w
type 1 or 2 : v.conj().T @ b @ v = I
type 3 : v.conj().T @ inv(b) @ v = I
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import eigh
>>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])
>>> w, v = eigh(A)
>>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
True
Request only the eigenvalues
>>> w = eigh(A, eigvals_only=True)
Request eigenvalues that are less than 10.
>>> A = np.array([[34, -4, -10, -7, 2],
... [-4, 7, 2, 12, 0],
... [-10, 2, 44, 2, -19],
... [-7, 12, 2, 79, -34],
... [2, 0, -19, -34, 29]])
>>> eigh(A, eigvals_only=True, subset_by_value=[-np.inf, 10])
array([6.69199443e-07, 9.11938152e+00])
Request the second smallest eigenvalue and its eigenvector
>>> w, v = eigh(A, subset_by_index=[1, 1])
>>> w
array([9.11938152])
>>> v.shape # only a single column is returned
(5, 1)
"""
if turbo is not _NoValue:
warnings.warn("Keyword argument 'turbo' is deprecated in favour of '"
"driver=gvd' keyword instead and will be removed in "
"SciPy 1.14.0.",
DeprecationWarning, stacklevel=2)
if eigvals is not _NoValue:
warnings.warn("Keyword argument 'eigvals' is deprecated in favour of "
"'subset_by_index' keyword instead and will be removed "
"in SciPy 1.14.0.",
DeprecationWarning, stacklevel=2)
# set lower
uplo = 'L' if lower else 'U'
# Set job for Fortran routines
_job = 'N' if eigvals_only else 'V'
drv_str = [None, "ev", "evd", "evr", "evx", "gv", "gvd", "gvx"]
if driver not in drv_str:
raise ValueError('"{}" is unknown. Possible values are "None", "{}".'
''.format(driver, '", "'.join(drv_str[1:])))
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square "a" matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
cplx = True if iscomplexobj(a1) else False
n = a1.shape[0]
drv_args = {'overwrite_a': overwrite_a}
if b is not None:
b1 = _asarray_validated(b, check_finite=check_finite)
overwrite_b = overwrite_b or _datacopied(b1, b)
if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
raise ValueError('expected square "b" matrix')
if b1.shape != a1.shape:
raise ValueError("wrong b dimensions {}, should "
"be {}".format(b1.shape, a1.shape))
if type not in [1, 2, 3]:
raise ValueError('"type" keyword only accepts 1, 2, and 3.')
cplx = True if iscomplexobj(b1) else (cplx or False)
drv_args.update({'overwrite_b': overwrite_b, 'itype': type})
# backwards-compatibility handling
subset_by_index = subset_by_index if (eigvals in (None, _NoValue)) else eigvals
subset = (subset_by_index is not None) or (subset_by_value is not None)
# Both subsets can't be given
if subset_by_index and subset_by_value:
raise ValueError('Either index or value subset can be requested.')
# Take turbo into account if all conditions are met otherwise ignore
if turbo not in (None, _NoValue) and b is not None:
driver = 'gvx' if subset else 'gvd'
# Check indices if given
if subset_by_index:
lo, hi = (int(x) for x in subset_by_index)
if not (0 <= lo <= hi < n):
raise ValueError('Requested eigenvalue indices are not valid. '
'Valid range is [0, {}] and start <= end, but '
'start={}, end={} is given'.format(n-1, lo, hi))
# fortran is 1-indexed
drv_args.update({'range': 'I', 'il': lo + 1, 'iu': hi + 1})
if subset_by_value:
lo, hi = subset_by_value
if not (-inf <= lo < hi <= inf):
raise ValueError('Requested eigenvalue bounds are not valid. '
'Valid range is (-inf, inf) and low < high, but '
'low={}, high={} is given'.format(lo, hi))
drv_args.update({'range': 'V', 'vl': lo, 'vu': hi})
# fix prefix for lapack routines
pfx = 'he' if cplx else 'sy'
# decide on the driver if not given
# first early exit on incompatible choice
if driver:
if b is None and (driver in ["gv", "gvd", "gvx"]):
raise ValueError('{} requires input b array to be supplied '
'for generalized eigenvalue problems.'
''.format(driver))
if (b is not None) and (driver in ['ev', 'evd', 'evr', 'evx']):
raise ValueError('"{}" does not accept input b array '
'for standard eigenvalue problems.'
''.format(driver))
if subset and (driver in ["ev", "evd", "gv", "gvd"]):
raise ValueError('"{}" cannot compute subsets of eigenvalues'
''.format(driver))
# Default driver is evr and gvd
else:
driver = "evr" if b is None else ("gvx" if subset else "gvd")
lwork_spec = {
'syevd': ['lwork', 'liwork'],
'syevr': ['lwork', 'liwork'],
'heevd': ['lwork', 'liwork', 'lrwork'],
'heevr': ['lwork', 'lrwork', 'liwork'],
}
if b is None: # Standard problem
drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),
[a1])
clw_args = {'n': n, 'lower': lower}
if driver == 'evd':
clw_args.update({'compute_v': 0 if _job == "N" else 1})
lw = _compute_lwork(drvlw, **clw_args)
# Multiple lwork vars
if isinstance(lw, tuple):
lwork_args = dict(zip(lwork_spec[pfx+driver], lw))
else:
lwork_args = {'lwork': lw}
drv_args.update({'lower': lower, 'compute_v': 0 if _job == "N" else 1})
w, v, *other_args, info = drv(a=a1, **drv_args, **lwork_args)
else: # Generalized problem
# 'gvd' doesn't have lwork query
if driver == "gvd":
drv = get_lapack_funcs(pfx + "gvd", [a1, b1])
lwork_args = {}
else:
drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),
[a1, b1])
# generalized drivers use uplo instead of lower
lw = _compute_lwork(drvlw, n, uplo=uplo)
lwork_args = {'lwork': lw}
drv_args.update({'uplo': uplo, 'jobz': _job})
w, v, *other_args, info = drv(a=a1, b=b1, **drv_args, **lwork_args)
# m is always the first extra argument
w = w[:other_args[0]] if subset else w
v = v[:, :other_args[0]] if (subset and not eigvals_only) else v
# Check if we had a successful exit
if info == 0:
if eigvals_only:
return w
else:
return w, v
else:
if info < -1:
raise LinAlgError('Illegal value in argument {} of internal {}'
''.format(-info, drv.typecode + pfx + driver))
elif info > n:
raise LinAlgError('The leading minor of order {} of B is not '
'positive definite. The factorization of B '
'could not be completed and no eigenvalues '
'or eigenvectors were computed.'.format(info-n))
else:
drv_err = {'ev': 'The algorithm failed to converge; {} '
'off-diagonal elements of an intermediate '
'tridiagonal form did not converge to zero.',
'evx': '{} eigenvectors failed to converge.',
'evd': 'The algorithm failed to compute an eigenvalue '
'while working on the submatrix lying in rows '
'and columns {0}/{1} through mod({0},{1}).',
'evr': 'Internal Error.'
}
if driver in ['ev', 'gv']:
msg = drv_err['ev'].format(info)
elif driver in ['evx', 'gvx']:
msg = drv_err['evx'].format(info)
elif driver in ['evd', 'gvd']:
if eigvals_only:
msg = drv_err['ev'].format(info)
else:
msg = drv_err['evd'].format(info, n+1)
else:
msg = drv_err['evr']
raise LinAlgError(msg)
_conv_dict = {0: 0, 1: 1, 2: 2,
'all': 0, 'value': 1, 'index': 2,
'a': 0, 'v': 1, 'i': 2}
def _check_select(select, select_range, max_ev, max_len):
"""Check that select is valid, convert to Fortran style."""
if isinstance(select, str):
select = select.lower()
try:
select = _conv_dict[select]
except KeyError as e:
raise ValueError('invalid argument for select') from e
vl, vu = 0., 1.
il = iu = 1
if select != 0: # (non-all)
sr = asarray(select_range)
if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]:
raise ValueError('select_range must be a 2-element array-like '
'in nondecreasing order')
if select == 1: # (value)
vl, vu = sr
if max_ev == 0:
max_ev = max_len
else: # 2 (index)
if sr.dtype.char.lower() not in 'hilqp':
raise ValueError('when using select="i", select_range must '
'contain integers, got dtype %s (%s)'
% (sr.dtype, sr.dtype.char))
# translate Python (0 ... N-1) into Fortran (1 ... N) with + 1
il, iu = sr + 1
if min(il, iu) < 1 or max(il, iu) > max_len:
raise ValueError('select_range out of bounds')
max_ev = iu - il + 1
return select, vl, vu, il, iu, max_ev
def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False,
select='a', select_range=None, max_ev=0, check_finite=True):
"""
Solve real symmetric or complex Hermitian band matrix eigenvalue problem.
Find eigenvalues w and optionally right eigenvectors v of a::
a v[:,i] = w[i] v[:,i]
v.H v = identity
The matrix a is stored in a_band either in lower diagonal or upper
diagonal ordered form:
a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)
a_band[ i - j, j] == a[i,j] (if lower form; i >= j)
where u is the number of bands above the diagonal.
Example of a_band (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
a_band : (u+1, M) array_like
The bands of the M by M matrix a.
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
eigvals_only : bool, optional
Compute only the eigenvalues and no eigenvectors.
(Default: calculate also eigenvectors)
overwrite_a_band : bool, optional
Discard data in a_band (may enhance performance)
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
max_ev : int, optional
For select=='v', maximum number of eigenvalues expected.
For other values of select, has no meaning.
In doubt, leave this parameter untouched.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
v : (M, M) float or complex ndarray
The normalized eigenvector corresponding to the eigenvalue w[i] is
the column v[:,i].
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
eig : eigenvalues and right eigenvectors of general arrays.
eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
eigh_tridiagonal : eigenvalues and right eigenvectors for
symmetric/Hermitian tridiagonal matrices
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import eig_banded
>>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])
>>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])
>>> w, v = eig_banded(Ab, lower=True)
>>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
True
>>> w = eig_banded(Ab, lower=True, eigvals_only=True)
>>> w
array([-4.26200532, -2.22987175, 3.95222349, 12.53965359])
Request only the eigenvalues between ``[-3, 4]``
>>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4])
>>> w
array([-2.22987175, 3.95222349])
"""
if eigvals_only or overwrite_a_band:
a1 = _asarray_validated(a_band, check_finite=check_finite)
overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band))
else:
a1 = array(a_band)
if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all():
raise ValueError("array must not contain infs or NaNs")
overwrite_a_band = 1
if len(a1.shape) != 2:
raise ValueError('expected a 2-D array')
select, vl, vu, il, iu, max_ev = _check_select(
select, select_range, max_ev, a1.shape[1])
del select_range
if select == 0:
if a1.dtype.char in 'GFD':
# FIXME: implement this somewhen, for now go with builtin values
# FIXME: calc optimal lwork by calling ?hbevd(lwork=-1)
# or by using calc_lwork.f ???
# lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower)
internal_name = 'hbevd'
else: # a1.dtype.char in 'fd':
# FIXME: implement this somewhen, for now go with builtin values
# see above
# lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower)
internal_name = 'sbevd'
bevd, = get_lapack_funcs((internal_name,), (a1,))
w, v, info = bevd(a1, compute_v=not eigvals_only,
lower=lower, overwrite_ab=overwrite_a_band)
else: # select in [1, 2]
if eigvals_only:
max_ev = 1
# calculate optimal abstol for dsbevx (see manpage)
if a1.dtype.char in 'fF': # single precision
lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),))
else:
lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),))
abstol = 2 * lamch('s')
if a1.dtype.char in 'GFD':
internal_name = 'hbevx'
else: # a1.dtype.char in 'gfd'
internal_name = 'sbevx'
bevx, = get_lapack_funcs((internal_name,), (a1,))
w, v, m, ifail, info = bevx(
a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev,
range=select, lower=lower, overwrite_ab=overwrite_a_band,
abstol=abstol)
# crop off w and v
w = w[:m]
if not eigvals_only:
v = v[:, :m]
_check_info(info, internal_name)
if eigvals_only:
return w
return w, v
def eigvals(a, b=None, overwrite_a=False, check_finite=True,
homogeneous_eigvals=False):
"""
Compute eigenvalues from an ordinary or generalized eigenvalue problem.
Find eigenvalues of a general matrix::
a vr[:,i] = w[i] b vr[:,i]
Parameters
----------
a : (M, M) array_like
A complex or real matrix whose eigenvalues and eigenvectors
will be computed.
b : (M, M) array_like, optional
Right-hand side matrix in a generalized eigenvalue problem.
If omitted, identity matrix is assumed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities
or NaNs.
homogeneous_eigvals : bool, optional
If True, return the eigenvalues in homogeneous coordinates.
In this case ``w`` is a (2, M) array so that::
w[1,i] a vr[:,i] = w[0,i] b vr[:,i]
Default is False.
Returns
-------
w : (M,) or (2, M) double or complex ndarray
The eigenvalues, each repeated according to its multiplicity
but not in any specific order. The shape is (M,) unless
``homogeneous_eigvals=True``.
Raises
------
LinAlgError
If eigenvalue computation does not converge
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays.
eigvalsh : eigenvalues of symmetric or Hermitian arrays
eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
matrices
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[0., -1.], [1., 0.]])
>>> linalg.eigvals(a)
array([0.+1.j, 0.-1.j])
>>> b = np.array([[0., 1.], [1., 1.]])
>>> linalg.eigvals(a, b)
array([ 1.+0.j, -1.+0.j])
>>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])
>>> linalg.eigvals(a, homogeneous_eigvals=True)
array([[3.+0.j, 8.+0.j, 7.+0.j],
[1.+0.j, 1.+0.j, 1.+0.j]])
"""
return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a,
check_finite=check_finite,
homogeneous_eigvals=homogeneous_eigvals)
def eigvalsh(a, b=None, lower=True, overwrite_a=False,
overwrite_b=False, turbo=_NoValue, eigvals=_NoValue, type=1,
check_finite=True, subset_by_index=None, subset_by_value=None,
driver=None):
"""
Solves a standard or generalized eigenvalue problem for a complex
Hermitian or real symmetric matrix.
Find eigenvalues array ``w`` of array ``a``, where ``b`` is positive
definite such that for every eigenvalue λ (i-th entry of w) and its
eigenvector vi (i-th column of v) satisfies::
a @ vi = λ * b @ vi
vi.conj().T @ a @ vi = λ
vi.conj().T @ b @ vi = 1
In the standard problem, b is assumed to be the identity matrix.
Parameters
----------
a : (M, M) array_like
A complex Hermitian or real symmetric matrix whose eigenvalues will
be computed.
b : (M, M) array_like, optional
A complex Hermitian or real symmetric definite positive matrix in.
If omitted, identity matrix is assumed.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of ``a`` and, if applicable, ``b``. (Default: lower)
overwrite_a : bool, optional
Whether to overwrite data in ``a`` (may improve performance). Default
is False.
overwrite_b : bool, optional
Whether to overwrite data in ``b`` (may improve performance). Default
is False.
type : int, optional
For the generalized problems, this keyword specifies the problem type
to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible
inputs)::
1 => a @ v = w @ b @ v
2 => a @ b @ v = w @ v
3 => b @ a @ v = w @ v
This keyword is ignored for standard problems.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
subset_by_index : iterable, optional
If provided, this two-element iterable defines the start and the end
indices of the desired eigenvalues (ascending order and 0-indexed).
To return only the second smallest to fifth smallest eigenvalues,
``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only
available with "evr", "evx", and "gvx" drivers. The entries are
directly converted to integers via ``int()``.
subset_by_value : iterable, optional
If provided, this two-element iterable defines the half-open interval
``(a, b]`` that, if any, only the eigenvalues between these values
are returned. Only available with "evr", "evx", and "gvx" drivers. Use
``np.inf`` for the unconstrained ends.
driver : str, optional
Defines which LAPACK driver should be used. Valid options are "ev",
"evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for
generalized (where b is not None) problems. See the Notes section of
`scipy.linalg.eigh`.
turbo : bool, optional, deprecated
.. deprecated:: 1.5.0
'eigvalsh' keyword argument `turbo` is deprecated in favor of
``driver=gvd`` option and will be removed in SciPy 1.14.0.
eigvals : tuple (lo, hi), optional
.. deprecated:: 1.5.0
'eigvalsh' keyword argument `eigvals` is deprecated in favor of
`subset_by_index` option and will be removed in SciPy 1.14.0.
Returns
-------
w : (N,) ndarray
The ``N`` (``1<=N<=M``) selected eigenvalues, in ascending order, each
repeated according to its multiplicity.
Raises
------
LinAlgError
If eigenvalue computation does not converge, an error occurred, or
b matrix is not definite positive. Note that if input matrices are
not symmetric or Hermitian, no error will be reported but results will
be wrong.
See Also
--------
eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
eigvals : eigenvalues of general arrays
eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
matrices
Notes
-----
This function does not check the input array for being Hermitian/symmetric
in order to allow for representing arrays with only their upper/lower
triangular parts.
This function serves as a one-liner shorthand for `scipy.linalg.eigh` with
the option ``eigvals_only=True`` to get the eigenvalues and not the
eigenvectors. Here it is kept as a legacy convenience. It might be
beneficial to use the main function to have full control and to be a bit
more pythonic.
Examples
--------
For more examples see `scipy.linalg.eigh`.
>>> import numpy as np
>>> from scipy.linalg import eigvalsh
>>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])
>>> w = eigvalsh(A)
>>> w
array([-3.74637491, -0.76263923, 6.08502336, 12.42399079])
"""
return eigh(a, b=b, lower=lower, eigvals_only=True,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
turbo=turbo, eigvals=eigvals, type=type,
check_finite=check_finite, subset_by_index=subset_by_index,
subset_by_value=subset_by_value, driver=driver)
def eigvals_banded(a_band, lower=False, overwrite_a_band=False,
select='a', select_range=None, check_finite=True):
"""
Solve real symmetric or complex Hermitian band matrix eigenvalue problem.
Find eigenvalues w of a::
a v[:,i] = w[i] v[:,i]
v.H v = identity
The matrix a is stored in a_band either in lower diagonal or upper
diagonal ordered form:
a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)
a_band[ i - j, j] == a[i,j] (if lower form; i >= j)
where u is the number of bands above the diagonal.
Example of a_band (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
a_band : (u+1, M) array_like
The bands of the M by M matrix a.
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
overwrite_a_band : bool, optional
Discard data in a_band (may enhance performance)
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
band matrices
eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
matrices
eigvals : eigenvalues of general arrays
eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
eig : eigenvalues and right eigenvectors for non-symmetric arrays
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import eigvals_banded
>>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])
>>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])
>>> w = eigvals_banded(Ab, lower=True)
>>> w
array([-4.26200532, -2.22987175, 3.95222349, 12.53965359])
"""
return eig_banded(a_band, lower=lower, eigvals_only=1,
overwrite_a_band=overwrite_a_band, select=select,
select_range=select_range, check_finite=check_finite)
def eigvalsh_tridiagonal(d, e, select='a', select_range=None,
check_finite=True, tol=0., lapack_driver='auto'):
"""
Solve eigenvalue problem for a real symmetric tridiagonal matrix.
Find eigenvalues `w` of ``a``::
a v[:,i] = w[i] v[:,i]
v.H v = identity
For a real symmetric matrix ``a`` with diagonal elements `d` and
off-diagonal elements `e`.
Parameters
----------
d : ndarray, shape (ndim,)
The diagonal elements of the array.
e : ndarray, shape (ndim-1,)
The off-diagonal elements of the array.
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
tol : float
The absolute tolerance to which each eigenvalue is required
(only used when ``lapack_driver='stebz'``).
An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If <= 0. (default),
the value ``eps*|a|`` is used where eps is the machine precision,
and ``|a|`` is the 1-norm of the matrix ``a``.
lapack_driver : str
LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
and 'stebz' otherwise. 'sterf' and 'stev' can only be used when
``select='a'``.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigh_tridiagonal : eigenvalues and right eiegenvectors for
symmetric/Hermitian tridiagonal matrices
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh
>>> d = 3*np.ones(4)
>>> e = -1*np.ones(3)
>>> w = eigvalsh_tridiagonal(d, e)
>>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
>>> w2 = eigvalsh(A) # Verify with other eigenvalue routines
>>> np.allclose(w - w2, np.zeros(4))
True
"""
return eigh_tridiagonal(
d, e, eigvals_only=True, select=select, select_range=select_range,
check_finite=check_finite, tol=tol, lapack_driver=lapack_driver)
def eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None,
check_finite=True, tol=0., lapack_driver='auto'):
"""
Solve eigenvalue problem for a real symmetric tridiagonal matrix.
Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``::
a v[:,i] = w[i] v[:,i]
v.H v = identity
For a real symmetric matrix ``a`` with diagonal elements `d` and
off-diagonal elements `e`.
Parameters
----------
d : ndarray, shape (ndim,)
The diagonal elements of the array.
e : ndarray, shape (ndim-1,)
The off-diagonal elements of the array.
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
tol : float
The absolute tolerance to which each eigenvalue is required
(only used when 'stebz' is the `lapack_driver`).
An eigenvalue (or cluster) is considered to have converged if it
lies in an interval of this width. If <= 0. (default),
the value ``eps*|a|`` is used where eps is the machine precision,
and ``|a|`` is the 1-norm of the matrix ``a``.
lapack_driver : str
LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and
``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is
used to find the corresponding eigenvectors. 'sterf' can only be
used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only
be used when ``select='a'``.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
v : (M, M) ndarray
The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is
the column ``v[:,i]``.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
matrices
eig : eigenvalues and right eigenvectors for non-symmetric arrays
eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
band matrices
Notes
-----
This function makes use of LAPACK ``S/DSTEMR`` routines.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import eigh_tridiagonal
>>> d = 3*np.ones(4)
>>> e = -1*np.ones(3)
>>> w, v = eigh_tridiagonal(d, e)
>>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
>>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
True
"""
d = _asarray_validated(d, check_finite=check_finite)
e = _asarray_validated(e, check_finite=check_finite)
for check in (d, e):
if check.ndim != 1:
raise ValueError('expected a 1-D array')
if check.dtype.char in 'GFD': # complex
raise TypeError('Only real arrays currently supported')
if d.size != e.size + 1:
raise ValueError('d (%s) must have one more element than e (%s)'
% (d.size, e.size))
select, vl, vu, il, iu, _ = _check_select(
select, select_range, 0, d.size)
if not isinstance(lapack_driver, str):
raise TypeError('lapack_driver must be str')
drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev')
if lapack_driver not in drivers:
raise ValueError('lapack_driver must be one of %s, got %s'
% (drivers, lapack_driver))
if lapack_driver == 'auto':
lapack_driver = 'stemr' if select == 0 else 'stebz'
func, = get_lapack_funcs((lapack_driver,), (d, e))
compute_v = not eigvals_only
if lapack_driver == 'sterf':
if select != 0:
raise ValueError('sterf can only be used when select == "a"')
if not eigvals_only:
raise ValueError('sterf can only be used when eigvals_only is '
'True')
w, info = func(d, e)
m = len(w)
elif lapack_driver == 'stev':
if select != 0:
raise ValueError('stev can only be used when select == "a"')
w, v, info = func(d, e, compute_v=compute_v)
m = len(w)
elif lapack_driver == 'stebz':
tol = float(tol)
internal_name = 'stebz'
stebz, = get_lapack_funcs((internal_name,), (d, e))
# If getting eigenvectors, needs to be block-ordered (B) instead of
# matrix-ordered (E), and we will reorder later
order = 'E' if eigvals_only else 'B'
m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol,
order)
else: # 'stemr'
# ?STEMR annoyingly requires size N instead of N-1
e_ = empty(e.size+1, e.dtype)
e_[:-1] = e
stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e))
lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu,
compute_v=compute_v)
_check_info(info, 'stemr_lwork')
m, w, v, info = func(d, e_, select, vl, vu, il, iu,
compute_v=compute_v, lwork=lwork, liwork=liwork)
_check_info(info, lapack_driver + ' (eigh_tridiagonal)')
w = w[:m]
if eigvals_only:
return w
else:
# Do we still need to compute the eigenvalues?
if lapack_driver == 'stebz':
func, = get_lapack_funcs(('stein',), (d, e))
v, info = func(d, e, w, iblock, isplit)
_check_info(info, 'stein (eigh_tridiagonal)',
positive='%d eigenvectors failed to converge')
# Convert block-order to matrix-order
order = argsort(w)
w, v = w[order], v[:, order]
else:
v = v[:, :m]
return w, v
def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'):
"""Check info return value."""
if info < 0:
raise ValueError('illegal value in argument %d of internal %s'
% (-info, driver))
if info > 0 and positive:
raise LinAlgError(("%s " + positive) % (driver, info,))
def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True):
"""
Compute Hessenberg form of a matrix.
The Hessenberg decomposition is::
A = Q H Q^H
where `Q` is unitary/orthogonal and `H` has only zero elements below
the first sub-diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to bring into Hessenberg form.
calc_q : bool, optional
Whether to compute the transformation matrix. Default is False.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance.
Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
H : (M, M) ndarray
Hessenberg form of `a`.
Q : (M, M) ndarray
Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``.
Only returned if ``calc_q=True``.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import hessenberg
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> H, Q = hessenberg(A, calc_q=True)
>>> H
array([[ 2. , -11.65843866, 1.42005301, 0.25349066],
[ -9.94987437, 14.53535354, -5.31022304, 2.43081618],
[ 0. , -1.83299243, 0.38969961, -0.51527034],
[ 0. , 0. , -3.83189513, 1.07494686]])
>>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4)))
True
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
# if 2x2 or smaller: already in Hessenberg
if a1.shape[0] <= 2:
if calc_q:
return a1, eye(a1.shape[0])
return a1
gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal',
'gehrd_lwork'), (a1,))
ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a)
_check_info(info, 'gebal (hessenberg)', positive=False)
n = len(a1)
lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi)
hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
_check_info(info, 'gehrd (hessenberg)', positive=False)
h = numpy.triu(hq, -1)
if not calc_q:
return h
# use orghr/unghr to compute q
orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,))
lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi)
q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
_check_info(info, 'orghr (hessenberg)', positive=False)
return h, q
def cdf2rdf(w, v):
"""
Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real
eigenvalues in a block diagonal form ``wr`` and the associated real
eigenvectors ``vr``, such that::
vr @ wr = X @ vr
continues to hold, where ``X`` is the original array for which ``w`` and
``v`` are the eigenvalues and eigenvectors.
.. versionadded:: 1.1.0
Parameters
----------
w : (..., M) array_like
Complex or real eigenvalues, an array or stack of arrays
Conjugate pairs must not be interleaved, else the wrong result
will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result,
but ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not.
v : (..., M, M) array_like
Complex or real eigenvectors, a square array or stack of square arrays.
Returns
-------
wr : (..., M, M) ndarray
Real diagonal block form of eigenvalues
vr : (..., M, M) ndarray
Real eigenvectors associated with ``wr``
See Also
--------
eig : Eigenvalues and right eigenvectors for non-symmetric arrays
rsf2csf : Convert real Schur form to complex Schur form
Notes
-----
``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``.
For example, obtained by ``w, v = scipy.linalg.eig(X)`` or
``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent
stacked arrays.
.. versionadded:: 1.1.0
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
>>> X
array([[ 1, 2, 3],
[ 0, 4, 5],
[ 0, -5, 4]])
>>> from scipy import linalg
>>> w, v = linalg.eig(X)
>>> w
array([ 1.+0.j, 4.+5.j, 4.-5.j])
>>> v
array([[ 1.00000+0.j , -0.01906-0.40016j, -0.01906+0.40016j],
[ 0.00000+0.j , 0.00000-0.64788j, 0.00000+0.64788j],
[ 0.00000+0.j , 0.64788+0.j , 0.64788-0.j ]])
>>> wr, vr = linalg.cdf2rdf(w, v)
>>> wr
array([[ 1., 0., 0.],
[ 0., 4., 5.],
[ 0., -5., 4.]])
>>> vr
array([[ 1. , 0.40016, -0.01906],
[ 0. , 0.64788, 0. ],
[ 0. , 0. , 0.64788]])
>>> vr @ wr
array([[ 1. , 1.69593, 1.9246 ],
[ 0. , 2.59153, 3.23942],
[ 0. , -3.23942, 2.59153]])
>>> X @ vr
array([[ 1. , 1.69593, 1.9246 ],
[ 0. , 2.59153, 3.23942],
[ 0. , -3.23942, 2.59153]])
"""
w, v = _asarray_validated(w), _asarray_validated(v)
# check dimensions
if w.ndim < 1:
raise ValueError('expected w to be at least 1D')
if v.ndim < 2:
raise ValueError('expected v to be at least 2D')
if v.ndim != w.ndim + 1:
raise ValueError('expected eigenvectors array to have exactly one '
'dimension more than eigenvalues array')
# check shapes
n = w.shape[-1]
M = w.shape[:-1]
if v.shape[-2] != v.shape[-1]:
raise ValueError('expected v to be a square matrix or stacked square '
'matrices: v.shape[-2] = v.shape[-1]')
if v.shape[-1] != n:
raise ValueError('expected the same number of eigenvalues as '
'eigenvectors')
# get indices for each first pair of complex eigenvalues
complex_mask = iscomplex(w)
n_complex = complex_mask.sum(axis=-1)
# check if all complex eigenvalues have conjugate pairs
if not (n_complex % 2 == 0).all():
raise ValueError('expected complex-conjugate pairs of eigenvalues')
# find complex indices
idx = nonzero(complex_mask)
idx_stack = idx[:-1]
idx_elem = idx[-1]
# filter them to conjugate indices, assuming pairs are not interleaved
j = idx_elem[0::2]
k = idx_elem[1::2]
stack_ind = ()
for i in idx_stack:
# should never happen, assuming nonzero orders by the last axis
assert (i[0::2] == i[1::2]).all(),\
"Conjugate pair spanned different arrays!"
stack_ind += (i[0::2],)
# all eigenvalues to diagonal form
wr = zeros(M + (n, n), dtype=w.real.dtype)
di = range(n)
wr[..., di, di] = w.real
# complex eigenvalues to real block diagonal form
wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag
wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag
# compute real eigenvectors associated with real block diagonal eigenvalues
u = zeros(M + (n, n), dtype=numpy.cdouble)
u[..., di, di] = 1.0
u[stack_ind + (j, j)] = 0.5j
u[stack_ind + (j, k)] = 0.5
u[stack_ind + (k, j)] = -0.5j
u[stack_ind + (k, k)] = 0.5
# multipy matrices v and u (equivalent to v @ u)
vr = einsum('...ij,...jk->...ik', v, u).real
return wr, vr
| 61,501
| 37.342893
| 83
|
py
|
scipy
|
scipy-main/scipy/linalg/_decomp_polar.py
|
import numpy as np
from scipy.linalg import svd
__all__ = ['polar']
def polar(a, side="right"):
"""
Compute the polar decomposition.
Returns the factors of the polar decomposition [1]_ `u` and `p` such
that ``a = up`` (if `side` is "right") or ``a = pu`` (if `side` is
"left"), where `p` is positive semidefinite. Depending on the shape
of `a`, either the rows or columns of `u` are orthonormal. When `a`
is a square array, `u` is a square unitary array. When `a` is not
square, the "canonical polar decomposition" [2]_ is computed.
Parameters
----------
a : (m, n) array_like
The array to be factored.
side : {'left', 'right'}, optional
Determines whether a right or left polar decomposition is computed.
If `side` is "right", then ``a = up``. If `side` is "left", then
``a = pu``. The default is "right".
Returns
-------
u : (m, n) ndarray
If `a` is square, then `u` is unitary. If m > n, then the columns
of `a` are orthonormal, and if m < n, then the rows of `u` are
orthonormal.
p : ndarray
`p` is Hermitian positive semidefinite. If `a` is nonsingular, `p`
is positive definite. The shape of `p` is (n, n) or (m, m), depending
on whether `side` is "right" or "left", respectively.
References
----------
.. [1] R. A. Horn and C. R. Johnson, "Matrix Analysis", Cambridge
University Press, 1985.
.. [2] N. J. Higham, "Functions of Matrices: Theory and Computation",
SIAM, 2008.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import polar
>>> a = np.array([[1, -1], [2, 4]])
>>> u, p = polar(a)
>>> u
array([[ 0.85749293, -0.51449576],
[ 0.51449576, 0.85749293]])
>>> p
array([[ 1.88648444, 1.2004901 ],
[ 1.2004901 , 3.94446746]])
A non-square example, with m < n:
>>> b = np.array([[0.5, 1, 2], [1.5, 3, 4]])
>>> u, p = polar(b)
>>> u
array([[-0.21196618, -0.42393237, 0.88054056],
[ 0.39378971, 0.78757942, 0.4739708 ]])
>>> p
array([[ 0.48470147, 0.96940295, 1.15122648],
[ 0.96940295, 1.9388059 , 2.30245295],
[ 1.15122648, 2.30245295, 3.65696431]])
>>> u.dot(p) # Verify the decomposition.
array([[ 0.5, 1. , 2. ],
[ 1.5, 3. , 4. ]])
>>> u.dot(u.T) # The rows of u are orthonormal.
array([[ 1.00000000e+00, -2.07353665e-17],
[ -2.07353665e-17, 1.00000000e+00]])
Another non-square example, with m > n:
>>> c = b.T
>>> u, p = polar(c)
>>> u
array([[-0.21196618, 0.39378971],
[-0.42393237, 0.78757942],
[ 0.88054056, 0.4739708 ]])
>>> p
array([[ 1.23116567, 1.93241587],
[ 1.93241587, 4.84930602]])
>>> u.dot(p) # Verify the decomposition.
array([[ 0.5, 1.5],
[ 1. , 3. ],
[ 2. , 4. ]])
>>> u.T.dot(u) # The columns of u are orthonormal.
array([[ 1.00000000e+00, -1.26363763e-16],
[ -1.26363763e-16, 1.00000000e+00]])
"""
if side not in ['right', 'left']:
raise ValueError("`side` must be either 'right' or 'left'")
a = np.asarray(a)
if a.ndim != 2:
raise ValueError("`a` must be a 2-D array.")
w, s, vh = svd(a, full_matrices=False)
u = w.dot(vh)
if side == 'right':
# a = up
p = (vh.T.conj() * s).dot(vh)
else:
# a = pu
p = (w * s).dot(w.T.conj())
return u, p
| 3,578
| 30.955357
| 77
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_cythonized_array_utils.py
|
import numpy as np
from scipy.linalg import bandwidth, issymmetric, ishermitian
import pytest
from pytest import raises
def test_bandwidth_dtypes():
n = 5
for t in np.typecodes['All']:
A = np.zeros([n, n], dtype=t)
if t in 'eUVOMm':
raises(TypeError, bandwidth, A)
elif t == 'G': # No-op test. On win these pass on others fail.
pass
else:
_ = bandwidth(A)
def test_bandwidth_non2d_input():
A = np.array([1, 2, 3])
raises(ValueError, bandwidth, A)
A = np.array([[[1, 2, 3], [4, 5, 6]]])
raises(ValueError, bandwidth, A)
@pytest.mark.parametrize('T', [x for x in np.typecodes['All']
if x not in 'eGUVOMm'])
def test_bandwidth_square_inputs(T):
n = 20
k = 4
R = np.zeros([n, n], dtype=T, order='F')
# form a banded matrix inplace
R[[x for x in range(n)], [x for x in range(n)]] = 1
R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1
R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1
R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1
assert bandwidth(R) == (k, k)
@pytest.mark.parametrize('T', [x for x in np.typecodes['All']
if x not in 'eGUVOMm'])
def test_bandwidth_rect_inputs(T):
n, m = 10, 20
k = 5
R = np.zeros([n, m], dtype=T, order='F')
# form a banded matrix inplace
R[[x for x in range(n)], [x for x in range(n)]] = 1
R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1
R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1
R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1
assert bandwidth(R) == (k, k)
def test_issymetric_ishermitian_dtypes():
n = 5
for t in np.typecodes['All']:
A = np.zeros([n, n], dtype=t)
if t in 'eUVOMm':
raises(TypeError, issymmetric, A)
raises(TypeError, ishermitian, A)
elif t == 'G': # No-op test. On win these pass on others fail.
pass
else:
assert issymmetric(A)
assert ishermitian(A)
def test_issymmetric_ishermitian_invalid_input():
A = np.array([1, 2, 3])
raises(ValueError, issymmetric, A)
raises(ValueError, ishermitian, A)
A = np.array([[[1, 2, 3], [4, 5, 6]]])
raises(ValueError, issymmetric, A)
raises(ValueError, ishermitian, A)
A = np.array([[1, 2, 3], [4, 5, 6]])
raises(ValueError, issymmetric, A)
raises(ValueError, ishermitian, A)
def test_issymetric_complex_decimals():
A = np.arange(1, 10).astype(complex).reshape(3, 3)
A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j
# make entries decimal
A /= np.pi
A = A + A.T
assert issymmetric(A)
def test_ishermitian_complex_decimals():
A = np.arange(1, 10).astype(complex).reshape(3, 3)
A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j
# make entries decimal
A /= np.pi
A = A + A.T.conj()
assert ishermitian(A)
def test_issymmetric_approximate_results():
n = 20
rng = np.random.RandomState(123456789)
x = rng.uniform(high=5., size=[n, n])
y = x @ x.T # symmetric
p = rng.standard_normal([n, n])
z = p @ y @ p.T
assert issymmetric(z, atol=1e-10)
assert issymmetric(z, atol=1e-10, rtol=0.)
assert issymmetric(z, atol=0., rtol=1e-12)
assert issymmetric(z, atol=1e-13, rtol=1e-12)
def test_ishermitian_approximate_results():
n = 20
rng = np.random.RandomState(987654321)
x = rng.uniform(high=5., size=[n, n])
y = x @ x.T # symmetric
p = rng.standard_normal([n, n]) + rng.standard_normal([n, n])*1j
z = p @ y @ p.conj().T
assert ishermitian(z, atol=1e-10)
assert ishermitian(z, atol=1e-10, rtol=0.)
assert ishermitian(z, atol=0., rtol=1e-12)
assert ishermitian(z, atol=1e-13, rtol=1e-12)
| 3,840
| 30.483607
| 71
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_interpolative.py
|
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
import scipy.linalg.interpolative as pymatrixid
import numpy as np
from scipy.linalg import hilbert, svdvals, norm
from scipy.sparse.linalg import aslinearoperator
from scipy.linalg.interpolative import interp_decomp
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import sys
_IS_32BIT = (sys.maxsize < 2**32)
@pytest.fixture()
def eps():
yield 1e-12
@pytest.fixture(params=[np.float64, np.complex128])
def A(request):
# construct Hilbert matrix
# set parameters
n = 300
yield hilbert(n).astype(request.param)
@pytest.fixture()
def L(A):
yield aslinearoperator(A)
@pytest.fixture()
def rank(A, eps):
S = np.linalg.svd(A, compute_uv=False)
try:
rank = np.nonzero(S < eps)[0][0]
except IndexError:
rank = A.shape[0]
return rank
class TestInterpolativeDecomposition:
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_real_id_fixed_precision(self, A, L, eps, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
# Test ID routines on a Hilbert matrix.
A_or_L = A if not lin_op else L
k, idx, proj = pymatrixid.interp_decomp(A_or_L, eps, rand=rand)
B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_real_id_fixed_rank(self, A, L, eps, rank, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
k = rank
A_or_L = A if not lin_op else L
idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize("rand,lin_op", [(False, False)])
def test_real_id_skel_and_interp_matrices(
self, A, L, eps, rank, rand, lin_op):
k = rank
A_or_L = A if not lin_op else L
idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(B, A[:, idx[:k]], rtol=eps, atol=1e-08)
assert_allclose(B @ P, A, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_svd_fixed_precison(self, A, L, eps, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
A_or_L = A if not lin_op else L
U, S, V = pymatrixid.svd(A_or_L, eps, rand=rand)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
@pytest.mark.parametrize(
"rand,lin_op",
[(False, False), (True, False), (True, True)])
def test_svd_fixed_rank(self, A, L, eps, rank, rand, lin_op):
if _IS_32BIT and A.dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
k = rank
A_or_L = A if not lin_op else L
U, S, V = pymatrixid.svd(A_or_L, k, rand=rand)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
def test_id_to_svd(self, A, eps, rank):
k = rank
idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
U, S, V = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj)
B = U * S @ V.T.conj()
assert_allclose(A, B, rtol=eps, atol=1e-08)
def test_estimate_spectral_norm(self, A):
s = svdvals(A)
norm_2_est = pymatrixid.estimate_spectral_norm(A)
assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
def test_estimate_spectral_norm_diff(self, A):
B = A.copy()
B[:, 0] *= 1.2
s = svdvals(A - B)
norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B)
assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
def test_rank_estimates_array(self, A):
B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
for M in [A, B]:
rank_tol = 1e-9
rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
rank_est = pymatrixid.estimate_rank(M, rank_tol)
assert_(rank_est >= rank_np)
assert_(rank_est <= rank_np + 10)
def test_rank_estimates_lin_op(self, A):
B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
for M in [A, B]:
ML = aslinearoperator(M)
rank_tol = 1e-9
rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
rank_est = pymatrixid.estimate_rank(ML, rank_tol)
assert_(rank_est >= rank_np - 4)
assert_(rank_est <= rank_np + 4)
def test_rand(self):
pymatrixid.seed('default')
assert_allclose(pymatrixid.rand(2), [0.8932059, 0.64500803],
rtol=1e-4, atol=1e-8)
pymatrixid.seed(1234)
x1 = pymatrixid.rand(2)
assert_allclose(x1, [0.7513823, 0.06861718], rtol=1e-4, atol=1e-8)
np.random.seed(1234)
pymatrixid.seed()
x2 = pymatrixid.rand(2)
np.random.seed(1234)
pymatrixid.seed(np.random.rand(55))
x3 = pymatrixid.rand(2)
assert_allclose(x1, x2)
assert_allclose(x1, x3)
def test_badcall(self):
A = hilbert(5).astype(np.float32)
with assert_raises(ValueError):
pymatrixid.interp_decomp(A, 1e-6, rand=False)
def test_rank_too_large(self):
# svd(array, k) should not segfault
a = np.ones((4, 3))
with assert_raises(ValueError):
pymatrixid.svd(a, 4)
def test_full_rank(self):
eps = 1.0e-12
# fixed precision
A = np.random.rand(16, 8)
k, idx, proj = pymatrixid.interp_decomp(A, eps)
assert_equal(k, A.shape[1])
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(A, B @ P)
# fixed rank
idx, proj = pymatrixid.interp_decomp(A, k)
P = pymatrixid.reconstruct_interp_matrix(idx, proj)
B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
assert_allclose(A, B @ P)
@pytest.mark.parametrize("dtype", [np.float_, np.complex_])
@pytest.mark.parametrize("rand", [True, False])
@pytest.mark.parametrize("eps", [1, 0.1])
def test_bug_9793(self, dtype, rand, eps):
if _IS_32BIT and dtype == np.complex_ and rand:
pytest.xfail("bug in external fortran code")
A = np.array([[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]],
dtype=dtype, order="C")
B = A.copy()
interp_decomp(A.T, eps, rand=rand)
assert_array_equal(A, B)
| 8,956
| 36.012397
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_matfuncs.py
|
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.matfuncs module
"""
import random
import functools
import numpy as np
from numpy import array, identity, dot, sqrt
from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_,
assert_array_less, assert_array_equal, assert_warns)
import pytest
import scipy.linalg
from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
expm, expm_frechet, expm_cond, norm, khatri_rao)
from scipy.linalg import _matfuncs_inv_ssq
import scipy.linalg._expm_frechet
from scipy.optimize import minimize
def _get_al_mohy_higham_2012_experiment_1():
"""
Return the test matrix from Experiment (1) of [1]_.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.2210e-1, 3e4],
[0, 0, 0, 3.0744e-1]], dtype=float)
return A
class TestSignM:
def test_nils(self):
a = array([[29.2, -24.2, 69.5, 49.8, 7.],
[-9.2, 5.2, -18., -16.8, -2.],
[-10., 6., -20., -18., -2.],
[-9.6, 9.6, -25.5, -15.4, -2.],
[9.8, -4.8, 18., 18.2, 2.]])
cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
[-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
[-4.08,0.56,-4.92,-7.6,0.56],
[-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
[4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
r = signm(a)
assert_array_almost_equal(r,cr)
def test_defective1(self):
a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
signm(a, disp=False)
#XXX: what would be the correct result?
def test_defective2(self):
a = array((
[29.2,-24.2,69.5,49.8,7.0],
[-9.2,5.2,-18.0,-16.8,-2.0],
[-10.0,6.0,-20.0,-18.0,-2.0],
[-9.6,9.6,-25.5,-15.4,-2.0],
[9.8,-4.8,18.0,18.2,2.0]))
signm(a, disp=False)
#XXX: what would be the correct result?
def test_defective3(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
signm(a, disp=False)
#XXX: what would be the correct result?
class TestLogM:
def test_nils(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
m = (identity(7)*3.1+0j)-a
logm(m, disp=False)
#XXX: what would be the correct result?
def test_al_mohy_higham_2012_experiment_1_logm(self):
# The logm completes the round trip successfully.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_logm, info = logm(A, disp=False)
A_round_trip = expm(A_logm)
assert_allclose(A_round_trip, A, rtol=5e-5, atol=1e-14)
def test_al_mohy_higham_2012_experiment_1_funm_log(self):
# The raw funm with np.log does not complete the round trip.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_funm_log, info = funm(A, np.log, disp=False)
A_round_trip = expm(A_funm_log)
assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
# Eigenvalues are related to the branch cut.
W = np.linalg.eigvals(M)
err_msg = f'M:{M} eivals:{W}'
# Check sqrtm round trip because it is used within logm.
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
# Check logm round trip.
M_logm, info = logm(M, disp=False)
M_logm_round_trip = expm(M_logm)
assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_logm, info = logm(M, disp=False)
M_round_trip = expm(M_logm)
assert_allclose(M_round_trip, M)
def test_logm_type_preservation_and_conversion(self):
# The logm matrix function should preserve the type of a matrix
# whose eigenvalues are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
def test_complex_spectrum_real_logm(self):
# This matrix has complex eigenvalues and real logm.
# Its output dtype depends on its input dtype.
M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
for dt in float, complex:
X = np.array(M, dtype=dt)
w = scipy.linalg.eigvals(X)
assert_(1e-2 < np.absolute(w.imag).sum())
Y, info = logm(X, disp=False)
assert_(np.issubdtype(Y.dtype, np.inexact))
assert_allclose(expm(Y), X)
def test_real_mixed_sign_spectrum(self):
# These matrices have real eigenvalues with mixed signs.
# The output logm dtype is complex, regardless of input dtype.
for M in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]]):
for dt in float, complex:
A = np.array(M, dtype=dt)
A_logm, info = logm(A, disp=False)
assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
def test_exactly_singular(self):
A = np.array([[0, 0], [1j, 1j]])
B = np.asarray([[1, 1], [0, 0]])
for M in A, A.T, B, B.T:
expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
L, info = assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_nearly_singular(self):
M = np.array([[1e-100]])
expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
L, info = assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_opposite_sign_complex_eigenvalues(self):
# See gh-6113
E = [[0, 1], [-1, 0]]
L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 4], [0, -1j]]
L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 0], [0, -1j]]
L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
class TestSqrtM:
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_bad(self):
# See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
e = 2**-5
se = sqrt(e)
a = array([[1.0,0,0,1],
[0,e,0,0],
[0,0,e,0],
[0,0,0,1]])
sa = array([[1,0,0,0.5],
[0,se,0,0],
[0,0,se,0],
[0,0,0,1]])
n = a.shape[0]
assert_array_almost_equal(dot(sa,sa),a)
# Check default sqrtm.
esa = sqrtm(a, disp=False, blocksize=n)[0]
assert_array_almost_equal(dot(esa,esa),a)
# Check sqrtm with 2x2 blocks.
esa = sqrtm(a, disp=False, blocksize=2)[0]
assert_array_almost_equal(dot(esa,esa),a)
def test_sqrtm_type_preservation_and_conversion(self):
# The sqrtm matrix function should preserve the type of a matrix
# whose eigenvalues are nonnegative with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]],
[[1, 1], [1, 1]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_blocksizes(self):
# Make sure I do not goof up the blocksizes when they do not divide n.
np.random.seed(1234)
for n in range(1, 8):
A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
for blocksize in range(1, 10):
A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
assert_allclose(A_sqrtm_default, A_sqrtm_new)
def test_al_mohy_higham_2012_experiment_1(self):
# Matrix square root of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
A_sqrtm, info = sqrtm(A, disp=False)
A_round_trip = A_sqrtm.dot(A_sqrtm)
assert_allclose(A_round_trip, A, rtol=1e-5)
assert_allclose(np.tril(A_round_trip), np.tril(A))
def test_strict_upper_triangular(self):
# This matrix has no square root.
for dt in int, float:
A = np.array([
[0, 3, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=dt)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(np.isnan(A_sqrtm).all())
def test_weird_matrix(self):
# The square root of matrix B exists.
for dt in int, float:
A = np.array([
[0, 0, 1],
[0, 0, 0],
[0, 1, 0]], dtype=dt)
B = np.array([
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]], dtype=dt)
assert_array_equal(B, A.dot(A))
# But scipy sqrtm is not clever enough to find it.
B_sqrtm, info = sqrtm(B, disp=False)
assert_(np.isnan(B_sqrtm).all())
def test_disp(self):
np.random.seed(1234)
A = np.random.rand(3, 3)
B = sqrtm(A, disp=True)
assert_allclose(B.dot(B), A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1+1j, 2], [0, 1-1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_gh4866(self):
M = np.array([[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 1]])
R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)],
[0, 0, 0, 0],
[0, 0, 0, 0],
[sqrt(0.5), 0, 0, sqrt(0.5)]])
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_gh5336(self):
M = np.diag([2, 1, 0])
R = np.diag([sqrt(2), 1, 0])
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_gh7839(self):
M = np.zeros((2, 2))
R = np.zeros((2, 2))
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
def test_data_size_preservation_uint_in_float_out(self):
M = np.zeros((10, 10), dtype=np.uint8)
# input bit size is 8, but minimum float bit size is 16
assert sqrtm(M).dtype == np.float16
M = np.zeros((10, 10), dtype=np.uint16)
assert sqrtm(M).dtype == np.float16
M = np.zeros((10, 10), dtype=np.uint32)
assert sqrtm(M).dtype == np.float32
M = np.zeros((10, 10), dtype=np.uint64)
assert sqrtm(M).dtype == np.float64
def test_data_size_preservation_int_in_float_out(self):
M = np.zeros((10, 10), dtype=np.int8)
# input bit size is 8, but minimum float bit size is 16
assert sqrtm(M).dtype == np.float16
M = np.zeros((10, 10), dtype=np.int16)
assert sqrtm(M).dtype == np.float16
M = np.zeros((10, 10), dtype=np.int32)
assert sqrtm(M).dtype == np.float32
M = np.zeros((10, 10), dtype=np.int64)
assert sqrtm(M).dtype == np.float64
def test_data_size_preservation_int_in_comp_out(self):
M = np.array([[2, 4], [0, -2]], dtype=np.int8)
# input bit size is 8, but minimum complex bit size is 64
assert sqrtm(M).dtype == np.complex64
M = np.array([[2, 4], [0, -2]], dtype=np.int16)
# input bit size is 16, but minimum complex bit size is 64
assert sqrtm(M).dtype == np.complex64
M = np.array([[2, 4], [0, -2]], dtype=np.int32)
assert sqrtm(M).dtype == np.complex64
M = np.array([[2, 4], [0, -2]], dtype=np.int64)
assert sqrtm(M).dtype == np.complex128
def test_data_size_preservation_float_in_float_out(self):
M = np.zeros((10, 10), dtype=np.float16)
assert sqrtm(M).dtype == np.float16
M = np.zeros((10, 10), dtype=np.float32)
assert sqrtm(M).dtype == np.float32
M = np.zeros((10, 10), dtype=np.float64)
assert sqrtm(M).dtype == np.float64
if hasattr(np, 'float128'):
M = np.zeros((10, 10), dtype=np.float128)
assert sqrtm(M).dtype == np.float128
def test_data_size_preservation_float_in_comp_out(self):
M = np.array([[2, 4], [0, -2]], dtype=np.float16)
# input bit size is 16, but minimum complex bit size is 64
assert sqrtm(M).dtype == np.complex64
M = np.array([[2, 4], [0, -2]], dtype=np.float32)
assert sqrtm(M).dtype == np.complex64
M = np.array([[2, 4], [0, -2]], dtype=np.float64)
assert sqrtm(M).dtype == np.complex128
if hasattr(np, 'float128') and hasattr(np, 'complex256'):
M = np.array([[2, 4], [0, -2]], dtype=np.float128)
assert sqrtm(M).dtype == np.complex256
def test_data_size_preservation_comp_in_comp_out(self):
M = np.array([[2j, 4], [0, -2j]], dtype=np.complex64)
assert sqrtm(M).dtype == np.complex128
if hasattr(np, 'complex256'):
M = np.array([[2j, 4], [0, -2j]], dtype=np.complex128)
assert sqrtm(M).dtype == np.complex256
M = np.array([[2j, 4], [0, -2j]], dtype=np.complex256)
assert sqrtm(M).dtype == np.complex256
class TestFractionalMatrixPower:
def test_round_trip_random_complex(self):
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1/p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_round_trip_random_float(self):
# This test is more annoying because it can hit the branch cut;
# this happens when the matrix has an eigenvalue
# with no imaginary component and with a real negative component,
# and it means that the principal branch does not exist.
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1/p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_larger_abs_fractional_matrix_powers(self):
np.random.seed(1234)
for n in (2, 3, 5):
for i in range(10):
M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
M_one_fifth = fractional_matrix_power(M, 0.2)
# Test the round trip.
M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
assert_allclose(M, M_round_trip)
# Test a large abs fractional power.
X = fractional_matrix_power(M, -5.4)
Y = np.linalg.matrix_power(M_one_fifth, -27)
assert_allclose(X, Y)
# Test another large abs fractional power.
X = fractional_matrix_power(M, 3.8)
Y = np.linalg.matrix_power(M_one_fifth, 19)
assert_allclose(X, Y)
def test_random_matrices_and_powers(self):
# Each independent iteration of this fuzz test picks random parameters.
# It tries to hit some edge cases.
np.random.seed(1234)
nsamples = 20
for i in range(nsamples):
# Sample a matrix size and a random real power.
n = random.randrange(1, 5)
p = np.random.randn()
# Sample a random real or complex matrix.
matrix_scale = np.exp(random.randrange(-4, 5))
A = np.random.randn(n, n)
if random.choice((True, False)):
A = A + 1j * np.random.randn(n, n)
A = A * matrix_scale
# Check a couple of analytically equivalent ways
# to compute the fractional matrix power.
# These can be compared because they both use the principal branch.
A_power = fractional_matrix_power(A, p)
A_logm, info = logm(A, disp=False)
A_power_expm_logm = expm(A_logm * p)
assert_allclose(A_power, A_power_expm_logm)
def test_al_mohy_higham_2012_experiment_1(self):
# Fractional powers of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
# Test remainder matrix power.
A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
A_sqrtm, info = sqrtm(A, disp=False)
A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
A_power = fractional_matrix_power(A, 0.5)
assert_array_equal(A_rem_power, A_power)
assert_allclose(A_sqrtm, A_power)
assert_allclose(A_sqrtm, A_funm_sqrt)
# Test more fractional powers.
for p in (1/2, 5/3):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1/p)
assert_allclose(A_round_trip, A, rtol=1e-2)
assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
def test_briggs_helper_function(self):
np.random.seed(1234)
for a in np.random.randn(10) + 1j * np.random.randn(10):
for k in range(5):
x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
x_expected = a ** np.exp2(-k) - 1
assert_allclose(x_observed, x_expected)
def test_type_preservation_and_conversion(self):
# The fractional_matrix_power matrix function should preserve
# the type of a matrix whose eigenvalues
# are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
def test_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
@pytest.mark.xfail(reason='Too unstable across LAPACKs.')
def test_singular(self):
# Negative fractional powers do not work with singular matrices.
for matrix_as_list in (
[[0, 0], [0, 0]],
[[1, 1], [1, 1]],
[[1, 2], [3, 6]],
[[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
# Check fractional powers both for float and for complex types.
for newtype in (float, complex):
A = np.array(matrix_as_list, dtype=newtype)
for p in (-0.7, -0.9, -2.4, -1.3):
A_power = fractional_matrix_power(A, p)
assert_(np.isnan(A_power).all())
for p in (0.2, 1.43):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1/p)
assert_allclose(A_round_trip, A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1+1j, 2], [0, 1-1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
class TestExpM:
def test_zero(self):
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_single_elt(self):
elt = expm(1)
assert_allclose(elt, np.array([[np.e]]))
def test_empty_matrix_input(self):
# handle gh-11082
A = np.zeros((0, 0))
result = expm(A)
assert result.size == 0
def test_2x2_input(self):
E = np.e
a = array([[1, 4], [1, 1]])
aa = (E**4 + 1)/(2*E)
bb = (E**4 - 1)/E
assert_allclose(expm(a), array([[aa, bb], [bb/4, aa]]))
assert expm(a.astype(np.complex64)).dtype.char == 'F'
assert expm(a.astype(np.float32)).dtype.char == 'f'
def test_nx2x2_input(self):
E = np.e
# These are integer matrices with integer eigenvalues
a = np.array([[[1, 4], [1, 1]],
[[1, 3], [1, -1]],
[[1, 3], [4, 5]],
[[1, 3], [5, 3]],
[[4, 5], [-3, -4]]], order='F')
# Exact results are computed symbolically
a_res = np.array([
[[(E**4+1)/(2*E), (E**4-1)/E],
[(E**4-1)/4/E, (E**4+1)/(2*E)]],
[[1/(4*E**2)+(3*E**2)/4, (3*E**2)/4-3/(4*E**2)],
[E**2/4-1/(4*E**2), 3/(4*E**2)+E**2/4]],
[[3/(4*E)+E**7/4, -3/(8*E)+(3*E**7)/8],
[-1/(2*E)+E**7/2, 1/(4*E)+(3*E**7)/4]],
[[5/(8*E**2)+(3*E**6)/8, -3/(8*E**2)+(3*E**6)/8],
[-5/(8*E**2)+(5*E**6)/8, 3/(8*E**2)+(5*E**6)/8]],
[[-3/(2*E)+(5*E)/2, -5/(2*E)+(5*E)/2],
[3/(2*E)-(3*E)/2, 5/(2*E)-(3*E)/2]]
])
assert_allclose(expm(a), a_res)
class TestExpmFrechet:
def test_expm_frechet(self):
# a test of the basic functionality
M = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A = np.array([
[1, 2],
[5, 6],
], dtype=float)
E = np.array([
[3, 4],
[7, 8],
], dtype=float)
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}):
observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_small_norm_expm_frechet(self):
# methodically test matrices with a range of norms, for better coverage
M_original = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A_original = np.array([
[1, 2],
[5, 6],
], dtype=float)
E_original = np.array([
[3, 4],
[7, 8],
], dtype=float)
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
for ma, mb in m_neighbor_pairs:
ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
target_norm_1 = 0.5 * (ell_a + ell_b)
scale = target_norm_1 / A_original_norm_1
M = scale * M_original
A = scale * A_original
E = scale * E_original
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_fuzz(self):
# try a bunch of crazy inputs
rfuncs = (
np.random.uniform,
np.random.normal,
np.random.standard_cauchy,
np.random.exponential)
ntests = 100
for i in range(ntests):
rfunc = random.choice(rfuncs)
target_norm_1 = random.expovariate(1.0)
n = random.randrange(2, 16)
A_original = rfunc(size=(n,n))
E_original = rfunc(size=(n,n))
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
scale = target_norm_1 / A_original_norm_1
A = scale * A_original
E = scale * E_original
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:n, n:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm, atol=5e-8)
assert_allclose(expected_frechet, observed_frechet, atol=1e-7)
def test_problematic_matrix(self):
# this test case uncovered a bug which has since been fixed
A = np.array([
[1.50591997, 1.93537998],
[0.41203263, 0.23443516],
], dtype=float)
E = np.array([
[1.87864034, 2.07055038],
[1.34102727, 0.67341123],
], dtype=float)
scipy.linalg.norm(A, 1)
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
@pytest.mark.slow
@pytest.mark.skip(reason='this test is deliberately slow')
def test_medium_matrix(self):
# profile this to see the speed difference
n = 1000
A = np.random.exponential(size=(n, n))
E = np.random.exponential(size=(n, n))
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
p = np.reshape(p, A.shape)
p_norm = norm(p)
perturbation = eps * p * (A_norm / p_norm)
X_prime = expm(A + perturbation)
scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
return -scaled_relative_error
def _normalized_like(A, B):
return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
def _relative_error(f, A, perturbation):
X = f(A)
X_prime = f(A + perturbation)
return norm(X_prime - X) / norm(X)
class TestExpmConditionNumber:
def test_expm_cond_smoke(self):
np.random.seed(1234)
for n in range(1, 4):
A = np.random.randn(n, n)
kappa = expm_cond(A)
assert_array_less(0, kappa)
def test_expm_bad_condition_number(self):
A = np.array([
[-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
[0, -1.201010529, 9.634696872e4, -4.681048289e9],
[0, 0, -1.132893222, 9.532491830e4],
[0, 0, 0, -1.179475332],
])
kappa = expm_cond(A)
assert_array_less(1e36, kappa)
def test_univariate(self):
np.random.seed(12345)
for x in np.linspace(-5, 5, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for x in np.logspace(-2, 2, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for i in range(10):
A = np.random.randn(1, 1)
assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
@pytest.mark.slow
def test_expm_cond_fuzz(self):
np.random.seed(12345)
eps = 1e-5
nsamples = 10
for i in range(nsamples):
n = np.random.randint(2, 5)
A = np.random.randn(n, n)
A_norm = scipy.linalg.norm(A)
X = expm(A)
X_norm = scipy.linalg.norm(X)
kappa = expm_cond(A)
# Look for the small perturbation that gives the greatest
# relative error.
f = functools.partial(_help_expm_cond_search,
A, A_norm, X, X_norm, eps)
guess = np.ones(n*n)
out = minimize(f, guess, method='L-BFGS-B')
xopt = out.x
yopt = f(xopt)
p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
p_best_relerr = _relative_error(expm, A, p_best)
assert_allclose(p_best_relerr, -yopt * eps)
# Check that the identified perturbation indeed gives greater
# relative error than random perturbations with similar norms.
for j in range(5):
p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
assert_allclose(norm(p_best), norm(p_rand))
p_rand_relerr = _relative_error(expm, A, p_rand)
assert_array_less(p_rand_relerr, p_best_relerr)
# The greatest relative error should not be much greater than
# eps times the condition number kappa.
# In the limit as eps approaches zero it should never be greater.
assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
class TestKhatriRao:
def test_basic(self):
a = khatri_rao(array([[1, 2], [3, 4]]),
array([[5, 6], [7, 8]]))
assert_array_equal(a, array([[5, 12],
[7, 16],
[15, 24],
[21, 32]]))
b = khatri_rao(np.empty([2, 2]), np.empty([2, 2]))
assert_array_equal(b.shape, (4, 2))
def test_number_of_columns_equality(self):
with pytest.raises(ValueError):
a = array([[1, 2, 3],
[4, 5, 6]])
b = array([[1, 2],
[3, 4]])
khatri_rao(a, b)
def test_to_assure_2d_array(self):
with pytest.raises(ValueError):
# both arrays are 1-D
a = array([1, 2, 3])
b = array([4, 5, 6])
khatri_rao(a, b)
with pytest.raises(ValueError):
# first array is 1-D
a = array([1, 2, 3])
b = array([
[1, 2, 3],
[4, 5, 6]
])
khatri_rao(a, b)
with pytest.raises(ValueError):
# second array is 1-D
a = array([
[1, 2, 3],
[7, 8, 9]
])
b = array([4, 5, 6])
khatri_rao(a, b)
def test_equality_of_two_equations(self):
a = array([[1, 2], [3, 4]])
b = array([[5, 6], [7, 8]])
res1 = khatri_rao(a, b)
res2 = np.vstack([np.kron(a[:, k], b[:, k])
for k in range(b.shape[1])]).T
assert_array_equal(res1, res2)
| 38,684
| 38.676923
| 109
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp.py
|
import itertools
import platform
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_, assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd,
hessenberg, rq, eig_banded, eigvals_banded, eigh,
eigvalsh, qr_multiply, qz, orth, ordqz,
subspace_angles, hadamard, eigvalsh_tridiagonal,
eigh_tridiagonal, null_space, cdf2rdf, LinAlgError)
from scipy.linalg.lapack import (dgbtrf, dgbtrs, zgbtrf, zgbtrs, dsbev,
dsbevd, dsbevx, zhbevd, zhbevx)
from scipy.linalg._misc import norm
from scipy.linalg._decomp_qz import _select_function
from scipy.stats import ortho_group
from numpy import (array, diag, full, linalg, argsort, zeros, arange,
float32, complex64, ravel, sqrt, iscomplex, shape, sort,
sign, asarray, isfinite, ndarray, eye,)
from scipy.linalg._testutils import assert_no_overwrite
from scipy.sparse._sputils import matrix
from scipy._lib._testutils import check_free_memory
from scipy.linalg.blas import HAS_ILP64
try:
from scipy.__config__ import CONFIG
except ImportError:
CONFIG = None
def _random_hermitian_matrix(n, posdef=False, dtype=float):
"Generate random sym/hermitian array of the given size n"
if dtype in COMPLEX_DTYPES:
A = np.random.rand(n, n) + np.random.rand(n, n)*1.0j
A = (A + A.conj().T)/2
else:
A = np.random.rand(n, n)
A = (A + A.T)/2
if posdef:
A += sqrt(2*n)*np.eye(n)
return A.astype(dtype)
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def clear_fuss(ar, fuss_binary_bits=7):
"""Clears trailing `fuss_binary_bits` of mantissa of a floating number"""
x = np.asanyarray(ar)
if np.iscomplexobj(x):
return clear_fuss(x.real) + 1j * clear_fuss(x.imag)
significant_binary_bits = np.finfo(x.dtype).nmant
x_mant, x_exp = np.frexp(x)
f = 2.0**(significant_binary_bits - fuss_binary_bits)
x_mant *= f
np.rint(x_mant, out=x_mant)
x_mant /= f
return np.ldexp(x_mant, x_exp)
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def symrand(dim_or_eigv, rng):
"""Return a random symmetric (Hermitian) matrix.
If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
uniformly distributed on (-1,1).
If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
eigenvalues are 'a'.
"""
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = rng.random(dim)*2 - 1
elif (isinstance(dim_or_eigv, ndarray) and
len(dim_or_eigv.shape) == 1):
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise TypeError("input type not supported.")
v = ortho_group.rvs(dim)
h = v.T.conj() @ diag(d) @ v
# to avoid roundoff errors, symmetrize the matrix (again)
h = 0.5*(h.T+h)
return h
class TestEigVals:
def test_simple(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
w = eigvals(a)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
assert_array_almost_equal(w, exact_w)
def test_simple_tr(self):
a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]], 'd').T
a = a.copy()
a = a.T
w = eigvals(a)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
assert_array_almost_equal(w, exact_w)
def test_simple_complex(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]
w = eigvals(a)
exact_w = [(9+1j+sqrt(92+6j))/2,
0,
(9+1j-sqrt(92+6j))/2]
assert_array_almost_equal(w, exact_w)
def test_finite(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
w = eigvals(a, check_finite=False)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
assert_array_almost_equal(w, exact_w)
class TestEig:
def test_simple(self):
a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
w, v = eig(a)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
v0 = array([1, 1, (1+sqrt(93)/3)/2])
v1 = array([3., 0, -1])
v2 = array([1, 1, (1-sqrt(93)/3)/2])
v0 = v0 / norm(v0)
v1 = v1 / norm(v1)
v2 = v2 / norm(v2)
assert_array_almost_equal(w, exact_w)
assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
for i in range(3):
assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
w, v = eig(a, left=1, right=0)
for i in range(3):
assert_array_almost_equal(a.T @ v[:, i], w[i]*v[:, i])
def test_simple_complex_eig(self):
a = array([[1, 2], [-2, 1]])
w, vl, vr = eig(a, left=1, right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
for i in range(2):
assert_array_almost_equal(a.conj().T @ vl[:, i],
w[i].conj()*vl[:, i])
def test_simple_complex(self):
a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
w, vl, vr = eig(a, left=1, right=1)
for i in range(3):
assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
for i in range(3):
assert_array_almost_equal(a.conj().T @ vl[:, i],
w[i].conj()*vl[:, i])
def test_gh_3054(self):
a = [[1]]
b = [[0]]
w, vr = eig(a, b, homogeneous_eigvals=True)
assert_allclose(w[1, 0], 0)
assert_(w[0, 0] != 0)
assert_allclose(vr, 1)
w, vr = eig(a, b)
assert_equal(w, np.inf)
assert_allclose(vr, 1)
def _check_gen_eig(self, A, B):
if B is not None:
A, B = asarray(A), asarray(B)
B0 = B
else:
A = asarray(A)
B0 = B
B = np.eye(*A.shape)
msg = f"\n{A!r}\n{B!r}"
# Eigenvalues in homogeneous coordinates
w, vr = eig(A, B0, homogeneous_eigvals=True)
wt = eigvals(A, B0, homogeneous_eigvals=True)
val1 = A @ vr * w[1, :]
val2 = B @ vr * w[0, :]
for i in range(val1.shape[1]):
assert_allclose(val1[:, i], val2[:, i],
rtol=1e-13, atol=1e-13, err_msg=msg)
if B0 is None:
assert_allclose(w[1, :], 1)
assert_allclose(wt[1, :], 1)
perm = np.lexsort(w)
permt = np.lexsort(wt)
assert_allclose(w[:, perm], wt[:, permt], atol=1e-7, rtol=1e-7,
err_msg=msg)
length = np.empty(len(vr))
for i in range(len(vr)):
length[i] = norm(vr[:, i])
assert_allclose(length, np.ones(length.size), err_msg=msg,
atol=1e-7, rtol=1e-7)
# Convert homogeneous coordinates
beta_nonzero = (w[1, :] != 0)
wh = w[0, beta_nonzero] / w[1, beta_nonzero]
# Eigenvalues in standard coordinates
w, vr = eig(A, B0)
wt = eigvals(A, B0)
val1 = A @ vr
val2 = B @ vr * w
res = val1 - val2
for i in range(res.shape[1]):
if np.all(isfinite(res[:, i])):
assert_allclose(res[:, i], 0,
rtol=1e-13, atol=1e-13, err_msg=msg)
w_fin = w[isfinite(w)]
wt_fin = wt[isfinite(wt)]
perm = argsort(clear_fuss(w_fin))
permt = argsort(clear_fuss(wt_fin))
assert_allclose(w[perm], wt[permt],
atol=1e-7, rtol=1e-7, err_msg=msg)
length = np.empty(len(vr))
for i in range(len(vr)):
length[i] = norm(vr[:, i])
assert_allclose(length, np.ones(length.size), err_msg=msg)
# Compare homogeneous and nonhomogeneous versions
assert_allclose(sort(wh), sort(w[np.isfinite(w)]))
@pytest.mark.xfail(reason="See gh-2254")
def test_singular(self):
# Example taken from
# https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
A = array([[22, 34, 31, 31, 17],
[45, 45, 42, 19, 29],
[39, 47, 49, 26, 34],
[27, 31, 26, 21, 15],
[38, 44, 44, 24, 30]])
B = array([[13, 26, 25, 17, 24],
[31, 46, 40, 26, 37],
[26, 40, 19, 25, 25],
[16, 25, 27, 14, 23],
[24, 35, 18, 21, 22]])
with np.errstate(all='ignore'):
self._check_gen_eig(A, B)
def test_falker(self):
# Test matrices giving some Nan generalized eigenvalues.
M = diag(array([1, 0, 3]))
K = array(([2, -1, -1], [-1, 2, -1], [-1, -1, 2]))
D = array(([1, -1, 0], [-1, 1, 0], [0, 0, 0]))
Z = zeros((3, 3))
I3 = eye(3)
A = np.block([[I3, Z], [Z, -K]])
B = np.block([[Z, I3], [M, D]])
with np.errstate(all='ignore'):
self._check_gen_eig(A, B)
def test_bad_geneig(self):
# Ticket #709 (strange return values from DGGEV)
def matrices(omega):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
# With a buggy LAPACK, this can fail for different omega on different
# machines -- so we need to test several values
with np.errstate(all='ignore'):
for k in range(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
def test_make_eigvals(self):
# Step through all paths in _make_eigvals
# Real eigenvalues
rng = np.random.RandomState(1234)
A = symrand(3, rng)
self._check_gen_eig(A, None)
B = symrand(3, rng)
self._check_gen_eig(A, B)
# Complex eigenvalues
A = rng.random((3, 3)) + 1j*rng.random((3, 3))
self._check_gen_eig(A, None)
B = rng.random((3, 3)) + 1j*rng.random((3, 3))
self._check_gen_eig(A, B)
def test_check_finite(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
w, v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
v0 = array([1, 1, (1+sqrt(93)/3)/2])
v1 = array([3., 0, -1])
v2 = array([1, 1, (1-sqrt(93)/3)/2])
v0 = v0 / norm(v0)
v1 = v1 / norm(v1)
v2 = v2 / norm(v2)
assert_array_almost_equal(w, exact_w)
assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
for i in range(3):
assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
def test_not_square_error(self):
"""Check that passing a non-square array raises a ValueError."""
A = np.arange(6).reshape(3, 2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
"""Check that passing arrays of with different shapes
raises a ValueError."""
A = eye(2)
B = np.arange(9.0).reshape(3, 3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
class TestEigBanded:
def setup_method(self):
self.create_bandmat()
def create_bandmat(self):
"""Create the full matrix `self.fullmat` and
the corresponding band matrix `self.bandmat`."""
N = 10
self.KL = 2 # number of subdiagonals (below the diagonal)
self.KU = 2 # number of superdiagonals (above the diagonal)
# symmetric band matrix
self.sym_mat = (diag(full(N, 1.0))
+ diag(full(N-1, -1.0), -1) + diag(full(N-1, -1.0), 1)
+ diag(full(N-2, -2.0), -2) + diag(full(N-2, -2.0), 2))
# hermitian band matrix
self.herm_mat = (diag(full(N, -1.0))
+ 1j*diag(full(N-1, 1.0), -1)
- 1j*diag(full(N-1, 1.0), 1)
+ diag(full(N-2, -2.0), -2)
+ diag(full(N-2, -2.0), 2))
# general real band matrix
self.real_mat = (diag(full(N, 1.0))
+ diag(full(N-1, -1.0), -1) + diag(full(N-1, -3.0), 1)
+ diag(full(N-2, 2.0), -2) + diag(full(N-2, -2.0), 2))
# general complex band matrix
self.comp_mat = (1j*diag(full(N, 1.0))
+ diag(full(N-1, -1.0), -1)
+ 1j*diag(full(N-1, -3.0), 1)
+ diag(full(N-2, 2.0), -2)
+ diag(full(N-2, -2.0), 2))
# Eigenvalues and -vectors from linalg.eig
ew, ev = linalg.eig(self.sym_mat)
ew = ew.real
args = argsort(ew)
self.w_sym_lin = ew[args]
self.evec_sym_lin = ev[:, args]
ew, ev = linalg.eig(self.herm_mat)
ew = ew.real
args = argsort(ew)
self.w_herm_lin = ew[args]
self.evec_herm_lin = ev[:, args]
# Extract upper bands from symmetric and hermitian band matrices
# (for use in dsbevd, dsbevx, zhbevd, zhbevx
# and their single precision versions)
LDAB = self.KU + 1
self.bandmat_sym = zeros((LDAB, N), dtype=float)
self.bandmat_herm = zeros((LDAB, N), dtype=complex)
for i in range(LDAB):
self.bandmat_sym[LDAB-i-1, i:N] = diag(self.sym_mat, i)
self.bandmat_herm[LDAB-i-1, i:N] = diag(self.herm_mat, i)
# Extract bands from general real and complex band matrix
# (for use in dgbtrf, dgbtrs and their single precision versions)
LDAB = 2*self.KL + self.KU + 1
self.bandmat_real = zeros((LDAB, N), dtype=float)
self.bandmat_real[2*self.KL, :] = diag(self.real_mat) # diagonal
for i in range(self.KL):
# superdiagonals
self.bandmat_real[2*self.KL-1-i, i+1:N] = diag(self.real_mat, i+1)
# subdiagonals
self.bandmat_real[2*self.KL+1+i, 0:N-1-i] = diag(self.real_mat,
-i-1)
self.bandmat_comp = zeros((LDAB, N), dtype=complex)
self.bandmat_comp[2*self.KL, :] = diag(self.comp_mat) # diagonal
for i in range(self.KL):
# superdiagonals
self.bandmat_comp[2*self.KL-1-i, i+1:N] = diag(self.comp_mat, i+1)
# subdiagonals
self.bandmat_comp[2*self.KL+1+i, 0:N-1-i] = diag(self.comp_mat,
-i-1)
# absolute value for linear equation system A*x = b
self.b = 1.0*arange(N)
self.bc = self.b * (1 + 1j)
#####################################################################
def test_dsbev(self):
"""Compare dsbev eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevd(self):
"""Compare dsbevd eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevx(self):
"""Compare dsbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N, N = shape(self.sym_mat)
# Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_zhbevd(self):
"""Compare zhbevd eigenvalues and eigenvectors
with the result of linalg.eig."""
w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_zhbevx(self):
"""Compare zhbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N, N = shape(self.herm_mat)
# Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_eigvals_banded(self):
"""Compare eigenvalues of eigvals_banded with those of linalg.eig."""
w_sym = eigvals_banded(self.bandmat_sym)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
w_herm = eigvals_banded(self.bandmat_herm)
w_herm = w_herm.real
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = np.longlong(6)
w_sym_ind = eigvals_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
w_herm_ind = eigvals_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val = eigvals_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val = eigvals_banded(self.bandmat_herm,
select='v',
select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
def test_eig_banded(self):
"""Compare eigenvalues and eigenvectors of eig_banded
with those of linalg.eig. """
w_sym, evec_sym = eig_banded(self.bandmat_sym)
evec_sym_ = evec_sym[:, argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
w_herm, evec_herm = eig_banded(self.bandmat_herm)
evec_herm_ = evec_herm[:, argsort(w_herm.real)]
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
select='i',
select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_ind),
abs(self.evec_sym_lin[:, ind1:ind2+1]))
w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
select='i',
select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_ind),
abs(self.evec_herm_lin[:, ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
select='v',
select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_val),
abs(self.evec_sym_lin[:, ind1:ind2+1]))
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
select='v',
select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_val),
abs(self.evec_herm_lin[:, ind1:ind2+1]))
w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
evec_sym_ = evec_sym[:, argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
def test_dgbtrf(self):
"""Compare dgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M, N = shape(self.real_mat)
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL, :])
for i in range(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_zgbtrf(self):
"""Compare zgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M, N = shape(self.comp_mat)
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL, :])
for i in range(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_dgbtrs(self):
"""Compare dgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
y_lin = linalg.solve(self.real_mat, self.b)
assert_array_almost_equal(y, y_lin)
def test_zgbtrs(self):
"""Compare zgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
y_lin = linalg.solve(self.comp_mat, self.bc)
assert_array_almost_equal(y, y_lin)
class TestEigTridiagonal:
def setup_method(self):
self.create_trimat()
def create_trimat(self):
"""Create the full matrix `self.fullmat`, `self.d`, and `self.e`."""
N = 10
# symmetric band matrix
self.d = full(N, 1.0)
self.e = full(N-1, -1.0)
self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1))
ew, ev = linalg.eig(self.full_mat)
ew = ew.real
args = argsort(ew)
self.w = ew[args]
self.evec = ev[:, args]
def test_degenerate(self):
"""Test error conditions."""
# Wrong sizes
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1])
# Must be real
assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j)
# Bad driver
assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver=1.)
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver='foo')
# Bad bounds
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
select='i', select_range=(0, -1))
def test_eigvalsh_tridiagonal(self):
"""Compare eigenvalues of eigvalsh_tridiagonal with those of eig."""
# can't use ?STERF with subselection
for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'):
w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w)
for driver in ('sterf', 'stev'):
assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
lapack_driver='stev', select='i',
select_range=(0, 1))
for driver in ('stebz', 'stemr', 'auto'):
# extracting eigenvalues with respect to the full index range
w_ind = eigvalsh_tridiagonal(
self.d, self.e, select='i', select_range=(0, len(self.d)-1),
lapack_driver=driver)
assert_array_almost_equal(sort(w_ind), self.w)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_ind = eigvalsh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w[ind1] - 1.0e-5
v_upper = self.w[ind2] + 1.0e-5
w_val = eigvalsh_tridiagonal(
self.d, self.e, select='v', select_range=(v_lower, v_upper),
lapack_driver=driver)
assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1])
def test_eigh_tridiagonal(self):
"""Compare eigenvalues and eigenvectors of eigh_tridiagonal
with those of eig. """
# can't use ?STERF when eigenvectors are requested
assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
lapack_driver='sterf')
for driver in ('stebz', 'stev', 'stemr', 'auto'):
w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver)
evec_ = evec[:, argsort(w)]
assert_array_almost_equal(sort(w), self.w)
assert_array_almost_equal(abs(evec_), abs(self.evec))
assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
lapack_driver='stev', select='i', select_range=(0, 1))
for driver in ('stebz', 'stemr', 'auto'):
# extracting eigenvalues with respect to an index range
ind1 = 0
ind2 = len(self.d)-1
w, evec = eigh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w)
assert_array_almost_equal(abs(evec), abs(self.evec))
ind1 = 2
ind2 = 6
w, evec = eigh_tridiagonal(
self.d, self.e, select='i', select_range=(ind1, ind2),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
assert_array_almost_equal(abs(evec),
abs(self.evec[:, ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w[ind1] - 1.0e-5
v_upper = self.w[ind2] + 1.0e-5
w, evec = eigh_tridiagonal(
self.d, self.e, select='v', select_range=(v_lower, v_upper),
lapack_driver=driver)
assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
assert_array_almost_equal(abs(evec),
abs(self.evec[:, ind1:ind2+1]))
class TestEigh:
def setup_class(self):
np.random.seed(1234)
def test_wrong_inputs(self):
# Nonsquare a
assert_raises(ValueError, eigh, np.ones([1, 2]))
# Nonsquare b
assert_raises(ValueError, eigh, np.ones([2, 2]), np.ones([2, 1]))
# Incompatible a, b sizes
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([2, 2]))
# Wrong type parameter for generalized problem
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
type=4)
# Both value and index subsets requested
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_value=[1, 2], subset_by_index=[2, 4])
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_value=[1, 2], eigvals=[2, 4])
# Invalid upper index spec
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[0, 4])
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
eigvals=[0, 4])
# Invalid lower index
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[-2, 2])
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
eigvals=[-2, 2])
# Invalid index spec #2
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[2, 0])
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_index=[2, 0])
# Invalid value spec
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
subset_by_value=[2, 0])
# Invalid driver name
assert_raises(ValueError, eigh, np.ones([2, 2]), driver='wrong')
# Generalized driver selection without b
assert_raises(ValueError, eigh, np.ones([3, 3]), None, driver='gvx')
# Standard driver with b
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
driver='evr')
# Subset request from invalid driver
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
driver='gvd', subset_by_index=[1, 2])
assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
driver='gvd', subset_by_index=[1, 2])
def test_nonpositive_b(self):
assert_raises(LinAlgError, eigh, np.ones([3, 3]), np.ones([3, 3]))
# index based subsets are done in the legacy test_eigh()
def test_value_subsets(self):
for ind, dt in enumerate(DTYPES):
a = _random_hermitian_matrix(20, dtype=dt)
w, v = eigh(a, subset_by_value=[-2, 2])
assert_equal(v.shape[1], len(w))
assert all((w > -2) & (w < 2))
b = _random_hermitian_matrix(20, posdef=True, dtype=dt)
w, v = eigh(a, b, subset_by_value=[-2, 2])
assert_equal(v.shape[1], len(w))
assert all((w > -2) & (w < 2))
def test_eigh_integer(self):
a = array([[1, 2], [2, 7]])
b = array([[3, 1], [1, 5]])
w, z = eigh(a)
w, z = eigh(a, b)
def test_eigh_of_sparse(self):
# This tests the rejection of inputs that eigh cannot currently handle.
import scipy.sparse
a = scipy.sparse.identity(2).tocsc()
b = np.atleast_2d(a)
assert_raises(ValueError, eigh, a)
assert_raises(ValueError, eigh, b)
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx"))
def test_various_drivers_standard(self, driver, dtype_):
a = _random_hermitian_matrix(n=20, dtype=dtype_)
w, v = eigh(a, driver=driver)
assert_allclose(a @ v - (v * w), 0.,
atol=1000*np.finfo(dtype_).eps,
rtol=0.)
@pytest.mark.parametrize('type', (1, 2, 3))
@pytest.mark.parametrize('driver', ("gv", "gvd", "gvx"))
def test_various_drivers_generalized(self, driver, type):
atol = np.spacing(5000.)
a = _random_hermitian_matrix(20)
b = _random_hermitian_matrix(20, posdef=True)
w, v = eigh(a=a, b=b, driver=driver, type=type)
if type == 1:
assert_allclose(a @ v - w*(b @ v), 0., atol=atol, rtol=0.)
elif type == 2:
assert_allclose(a @ b @ v - v * w, 0., atol=atol, rtol=0.)
else:
assert_allclose(b @ a @ v - v * w, 0., atol=atol, rtol=0.)
def test_eigvalsh_new_args(self):
a = _random_hermitian_matrix(5)
w = eigvalsh(a, subset_by_index=[1, 2])
assert_equal(len(w), 2)
w2 = eigvalsh(a, subset_by_index=[1, 2])
assert_equal(len(w2), 2)
assert_allclose(w, w2)
b = np.diag([1, 1.2, 1.3, 1.5, 2])
w3 = eigvalsh(b, subset_by_value=[1, 1.4])
assert_equal(len(w3), 2)
assert_allclose(w3, np.array([1.2, 1.3]))
@pytest.mark.parametrize("method", [eigh, eigvalsh])
def test_deprecation_warnings(self, method):
with pytest.warns(DeprecationWarning,
match="Keyword argument 'turbo'"):
method(np.zeros((2, 2)), turbo=True)
with pytest.warns(DeprecationWarning,
match="Keyword argument 'eigvals'"):
method(np.zeros((2, 2)), eigvals=[0, 1])
def test_deprecation_results(self):
a = _random_hermitian_matrix(3)
b = _random_hermitian_matrix(3, posdef=True)
# check turbo gives same result as driver='gvd'
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Keyword argument 'turbo'")
w_dep, v_dep = eigh(a, b, turbo=True)
w, v = eigh(a, b, driver='gvd')
assert_allclose(w_dep, w)
assert_allclose(v_dep, v)
# check eigvals gives the same result as subset_by_index
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Keyword argument 'eigvals'")
w_dep, v_dep = eigh(a, eigvals=[0, 1])
w, v = eigh(a, subset_by_index=[0, 1])
assert_allclose(w_dep, w)
assert_allclose(v_dep, v)
class TestSVD_GESDD:
lapack_driver = 'gesdd'
def test_degenerate(self):
assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
def test_simple(self):
a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]]
for full_matrices in (True, False):
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.T @ u, eye(3))
assert_array_almost_equal(vh.T @ vh, eye(3))
sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_simple_singular(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
for full_matrices in (True, False):
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.T @ u, eye(3))
assert_array_almost_equal(vh.T @ vh, eye(3))
sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_simple_underdet(self):
a = [[1, 2, 3], [4, 5, 6]]
for full_matrices in (True, False):
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.T @ u, eye(u.shape[0]))
sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_simple_overdet(self):
a = [[1, 2], [4, 5], [3, 4]]
for full_matrices in (True, False):
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.T @ u, eye(u.shape[1]))
assert_array_almost_equal(vh.T @ vh, eye(2))
sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_random(self):
rng = np.random.RandomState(1234)
n = 20
m = 15
for i in range(3):
for a in [rng.random([n, m]), rng.random([m, n])]:
for full_matrices in (True, False):
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.T @ u, eye(u.shape[1]))
assert_array_almost_equal(vh @ vh.T, eye(vh.shape[0]))
sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_simple_complex(self):
a = [[1, 2, 3], [1, 2j, 3], [2, 5, 6]]
for full_matrices in (True, False):
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.conj().T @ u, eye(u.shape[1]))
assert_array_almost_equal(vh.conj().T @ vh, eye(vh.shape[0]))
sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_random_complex(self):
rng = np.random.RandomState(1234)
n = 20
m = 15
for i in range(3):
for full_matrices in (True, False):
for a in [rng.random([n, m]), rng.random([m, n])]:
a = a + 1j*rng.random(list(a.shape))
u, s, vh = svd(a, full_matrices=full_matrices,
lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.conj().T @ u,
eye(u.shape[1]))
# This fails when [m,n]
# assert_array_almost_equal(vh.conj().T @ vh,
# eye(len(vh),dtype=vh.dtype.char))
sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_crash_1580(self):
rng = np.random.RandomState(1234)
sizes = [(13, 23), (30, 50), (60, 100)]
for sz in sizes:
for dt in [np.float32, np.float64, np.complex64, np.complex128]:
a = rng.rand(*sz).astype(dt)
# should not crash
svd(a, lapack_driver=self.lapack_driver)
def test_check_finite(self):
a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]]
u, s, vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
assert_array_almost_equal(u.T @ u, eye(3))
assert_array_almost_equal(vh.T @ vh, eye(3))
sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
for i in range(len(s)):
sigma[i, i] = s[i]
assert_array_almost_equal(u @ sigma @ vh, a)
def test_gh_5039(self):
# This is a smoke test for https://github.com/scipy/scipy/issues/5039
#
# The following is reported to raise "ValueError: On entry to DGESDD
# parameter number 12 had an illegal value".
# `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`
# This is reported to only show up on LAPACK 3.0.3.
#
# The matrix below is taken from the call to
# `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest
b = np.array(
[[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
[0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
[0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
[0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
svd(b, lapack_driver=self.lapack_driver)
@pytest.mark.skipif(not HAS_ILP64, reason="64-bit LAPACK required")
@pytest.mark.slow
def test_large_matrix(self):
check_free_memory(free_mb=17000)
A = np.zeros([1, 2**31], dtype=np.float32)
A[0, -1] = 1
u, s, vh = svd(A, full_matrices=False)
assert_allclose(s[0], 1.0)
assert_allclose(u[0, 0] * vh[0, -1], 1.0)
class TestSVD_GESVD(TestSVD_GESDD):
lapack_driver = 'gesvd'
class TestSVDVals:
def test_empty(self):
for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
s = svdvals(a)
assert_equal(s, np.empty(0))
def test_simple(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet(self):
a = [[1, 2, 3], [4, 5, 6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet(self):
a = [[1, 2], [4, 5], [3, 4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_complex(self):
a = [[1, 2, 3], [1, 20, 3j], [2, 5, 6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet_complex(self):
a = [[1, 2, 3], [4, 5j, 6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet_complex(self):
a = [[1, 2], [4, 5], [3j, 4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_check_finite(self):
a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
s = svdvals(a, check_finite=False)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
@pytest.mark.slow
def test_crash_2609(self):
np.random.seed(1234)
a = np.random.rand(1500, 2800)
# Shouldn't crash:
svdvals(a)
class TestDiagSVD:
def test_simple(self):
assert_array_almost_equal(diagsvd([1, 0, 0], 3, 3),
[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
class TestQR:
def test_simple(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_simple_left(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a)
c = [1, 2, 3]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
qc, r2 = qr_multiply(a, eye(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a)
c = [1, 2, 3]
qc, r2 = qr_multiply(a, c)
assert_array_almost_equal(c @ q, qc)
assert_array_almost_equal(r, r2)
qc, r = qr_multiply(a, eye(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8, 2, 3], [2, 9, 3], [5, 3, 6]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_left_pivoting(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc, r, jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(q @ c, qc)
def test_simple_right_pivoting(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, qc)
def test_simple_trap(self):
a = [[8, 2, 3], [2, 9, 3]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8, 2, 3], [2, 9, 3]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_tall(self):
# full version
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8, 2], [2, 9], [5, 3]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_tall_e(self):
# economy version
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a, mode='economic')
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (3, 2))
assert_equal(r.shape, (2, 2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8, 2], [2, 9], [5, 3]])
q, r, p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p], mode='economic')
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_tall_left(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a, mode="economic")
c = [1, 2]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
c = array([1, 2, 0])
qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(q @ c[:2], qc)
qc, r = qr_multiply(a, eye(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r, jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc, r, kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(q @ c, qc)
qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r = qr(a, mode="economic")
c = [1, 2, 3]
cq, r2 = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
assert_array_almost_equal(r, r2)
cq, r = qr_multiply(a, eye(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8, 2], [2, 9], [5, 3]]
q, r, jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, cq)
cq, r, jpvt = qr_multiply(a, eye(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8, 2, 5], [2, 9, 3]]
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8, 2, 5], [2, 9, 3]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_fat_e(self):
# economy version
a = [[8, 2, 3], [2, 9, 5]]
q, r = qr(a, mode='economic')
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8, 2, 3], [2, 9, 5]])
q, r, p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(q @ r, a[:, p])
assert_equal(q.shape, (2, 2))
assert_equal(r.shape, (2, 3))
q2, r2 = qr(a[:, p], mode='economic')
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_fat_left(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r = qr(a, mode="economic")
c = [1, 2]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
qc, r = qr_multiply(a, eye(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r, jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc, r, jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(q @ c, qc)
qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r = qr(a, mode="economic")
c = [1, 2]
cq, r2 = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
assert_array_almost_equal(r, r2)
cq, r = qr_multiply(a, eye(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8, 2, 3], [2, 9, 5]]
q, r, jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, cq)
cq, r, jpvt = qr_multiply(a, eye(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
assert_array_almost_equal(q.conj().T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_simple_complex_left(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = [1, 2, 3+4j]
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = [1, 2, 3+4j]
qc, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, qc)
qc, r = qr_multiply(a, eye(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8, 2+3j], [2, 9], [5+7j, 3]]
q, r = qr(a, mode="economic")
c = [1, 2+2j]
qc, r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
assert_array_almost_equal(r, r2)
c = array([1, 2, 0])
qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(q @ c[:2], qc)
qc, r = qr_multiply(a, eye(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = [1, 2, 3+4j]
qc, r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(q.conj() @ c, qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3, 3+4j], [5, 2+2j], [3, 2]]
q, r = qr(a, mode='economic')
c = [1, 3+4j]
qc, r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(q.conj() @ c, qc)
def test_simple_complex_right_conjugate(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
q, r = qr(a)
c = np.array([1, 2, 3+4j])
qc, r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(c @ q.conj(), qc)
def test_simple_complex_pivoting(self):
a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.conj().T @ q, eye(3))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_simple_complex_left_pivoting(self):
a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc, r, jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(q @ c, qc)
def test_simple_complex_right_pivoting(self):
a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
q, r, jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc, r, jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(c @ q, qc)
def test_random(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a)
def test_random_left(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r = qr(a)
c = rng.random([n])
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r = qr(a)
c = rng.random([n])
cq, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
cq, r = qr_multiply(a, eye(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_tall(self):
rng = np.random.RandomState(1234)
# full version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a)
def test_random_tall_left(self):
rng = np.random.RandomState(1234)
# full version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a, mode="economic")
c = rng.random([n])
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
rng = np.random.RandomState(1234)
# full version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a, mode="economic")
c = rng.random([m])
cq, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
cq, r = qr_multiply(a, eye(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
rng = np.random.RandomState(1234)
# full version pivoting
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_tall_e(self):
rng = np.random.RandomState(1234)
# economy version
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r = qr(a, mode='economic')
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (n, n))
def test_random_tall_e_pivoting(self):
rng = np.random.RandomState(1234)
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
q, r, p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(n))
assert_array_almost_equal(q @ r, a[:, p])
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (n, n))
q2, r2 = qr(a[:, p], mode='economic')
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_trap(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
q, r = qr(a)
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a)
def test_random_trap_pivoting(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.T @ q, eye(m))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_random_complex(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r = qr(a)
assert_array_almost_equal(q.conj().T @ q, eye(n))
assert_array_almost_equal(q @ r, a)
def test_random_complex_left(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r = qr(a)
c = rng.random([n]) + 1j*rng.random([n])
qc, r = qr_multiply(a, c, "left")
assert_array_almost_equal(q @ c, qc)
qc, r = qr_multiply(a, eye(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r = qr(a)
c = rng.random([n]) + 1j*rng.random([n])
cq, r = qr_multiply(a, c)
assert_array_almost_equal(c @ q, cq)
cq, r = qr_multiply(a, eye(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
q, r, p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(np.all(d[1:] <= d[:-1]))
assert_array_almost_equal(q.conj().T @ q, eye(n))
assert_array_almost_equal(q @ r, a[:, p])
q2, r2 = qr(a[:, p])
assert_array_almost_equal(q, q2)
assert_array_almost_equal(r, r2)
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
q, r = qr(a, check_finite=False)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(q @ r, a)
def test_lwork(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
# Get comparison values
q, r = qr(a, lwork=None)
# Test against minimum valid lwork
q2, r2 = qr(a, lwork=3)
assert_array_almost_equal(q2, q)
assert_array_almost_equal(r2, r)
# Test against larger lwork
q3, r3 = qr(a, lwork=10)
assert_array_almost_equal(q3, q)
assert_array_almost_equal(r3, r)
# Test against explicit lwork=-1
q4, r4 = qr(a, lwork=-1)
assert_array_almost_equal(q4, q)
assert_array_almost_equal(r4, r)
# Test against invalid lwork
assert_raises(Exception, qr, (a,), {'lwork': 0})
assert_raises(Exception, qr, (a,), {'lwork': 2})
class TestRQ:
def test_simple(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(3))
assert_array_almost_equal(r @ q, a)
def test_r(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
r, q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_simple_trap(self):
a = [[8, 2, 3], [2, 9, 3]]
r, q = rq(a)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(r @ q, a)
def test_simple_tall(self):
a = [[8, 2], [2, 9], [5, 3]]
r, q = rq(a)
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(r @ q, a)
def test_simple_fat(self):
a = [[8, 2, 5], [2, 9, 3]]
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(3))
assert_array_almost_equal(r @ q, a)
def test_simple_complex(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
r, q = rq(a)
assert_array_almost_equal(q @ q.conj().T, eye(3))
assert_array_almost_equal(r @ q, a)
def test_random_tall(self):
rng = np.random.RandomState(1234)
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_random_trap(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_random_trap_economic(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
r, q = rq(a, mode='economic')
assert_array_almost_equal(q @ q.T, eye(m))
assert_array_almost_equal(r @ q, a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.conj().T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_random_complex_economic(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n]) + 1j*rng.random([m, n])
r, q = rq(a, mode='economic')
assert_array_almost_equal(q @ q.conj().T, eye(m))
assert_array_almost_equal(r @ q, a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
r, q = rq(a, check_finite=False)
assert_array_almost_equal(q @ q.T, eye(3))
assert_array_almost_equal(r @ q, a)
class TestSchur:
def check_schur(self, a, t, u, rtol, atol):
# Check that the Schur decomposition is correct.
assert_allclose(u @ t @ u.conj().T, a, rtol=rtol, atol=atol,
err_msg="Schur decomposition does not match 'a'")
# The expected value of u @ u.H - I is all zeros, so test
# with absolute tolerance only.
assert_allclose(u @ u.conj().T - np.eye(len(u)), 0, rtol=0, atol=atol,
err_msg="u is not unitary")
def test_simple(self):
a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]]
t, z = schur(a)
self.check_schur(a, t, z, rtol=1e-14, atol=5e-15)
tc, zc = schur(a, 'complex')
assert_(np.any(ravel(iscomplex(zc))) and np.any(ravel(iscomplex(tc))))
self.check_schur(a, tc, zc, rtol=1e-14, atol=5e-15)
tc2, zc2 = rsf2csf(tc, zc)
self.check_schur(a, tc2, zc2, rtol=1e-14, atol=5e-15)
@pytest.mark.parametrize(
'sort, expected_diag',
[('lhp', [-np.sqrt(2), -0.5, np.sqrt(2), 0.5]),
('rhp', [np.sqrt(2), 0.5, -np.sqrt(2), -0.5]),
('iuc', [-0.5, 0.5, np.sqrt(2), -np.sqrt(2)]),
('ouc', [np.sqrt(2), -np.sqrt(2), -0.5, 0.5]),
(lambda x: x >= 0.0, [np.sqrt(2), 0.5, -np.sqrt(2), -0.5])]
)
def test_sort(self, sort, expected_diag):
# The exact eigenvalues of this matrix are
# -sqrt(2), sqrt(2), -1/2, 1/2.
a = [[4., 3., 1., -1.],
[-4.5, -3.5, -1., 1.],
[9., 6., -4., 4.5],
[6., 4., -3., 3.5]]
t, u, sdim = schur(a, sort=sort)
self.check_schur(a, t, u, rtol=1e-14, atol=5e-15)
assert_allclose(np.diag(t), expected_diag, rtol=1e-12)
assert_equal(2, sdim)
def test_sort_errors(self):
a = [[4., 3., 1., -1.],
[-4.5, -3.5, -1., 1.],
[9., 6., -4., 4.5],
[6., 4., -3., 3.5]]
assert_raises(ValueError, schur, a, sort='unsupported')
assert_raises(ValueError, schur, a, sort=1)
def test_check_finite(self):
a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]]
t, z = schur(a, check_finite=False)
assert_array_almost_equal(z @ t @ z.conj().T, a)
class TestHessenberg:
def test_simple(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000, 42.2037, -156.3165],
[-537.6783, 152.5511, -554.9272],
[0, 0.0728, 2.4489]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q.T @ a @ q, h)
assert_array_almost_equal(h, h1, decimal=4)
def test_simple_complex(self):
a = [[-149, -50, -154],
[537, 180j, 546],
[-27j, -9, -25]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q.conj().T @ a @ q, h)
def test_simple2(self):
a = [[1, 2, 3, 4, 5, 6, 7],
[0, 2, 3, 4, 6, 7, 2],
[0, 2, 2, 3, 0, 3, 2],
[0, 0, 2, 8, 0, 0, 2],
[0, 3, 1, 2, 0, 1, 2],
[0, 1, 2, 3, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 2]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q.T @ a @ q, h)
def test_simple3(self):
a = np.eye(3)
a[-1, 0] = 2
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q.T @ a @ q, h)
def test_random(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q.T @ a @ q, h)
def test_random_complex(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q.conj().T @ a @ q, h)
def test_check_finite(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000, 42.2037, -156.3165],
[-537.6783, 152.5511, -554.9272],
[0, 0.0728, 2.4489]]
h, q = hessenberg(a, calc_q=1, check_finite=False)
assert_array_almost_equal(q.T @ a @ q, h)
assert_array_almost_equal(h, h1, decimal=4)
def test_2x2(self):
a = [[2, 1], [7, 12]]
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(q, np.eye(2))
assert_array_almost_equal(h, a)
b = [[2-7j, 1+2j], [7+3j, 12-2j]]
h2, q2 = hessenberg(b, calc_q=1)
assert_array_almost_equal(q2, np.eye(2))
assert_array_almost_equal(h2, b)
blas_provider = blas_version = None
if CONFIG is not None:
blas_provider = CONFIG['Build Dependencies']['blas']['name']
blas_version = CONFIG['Build Dependencies']['blas']['version']
class TestQZ:
@pytest.mark.xfail(
sys.platform == 'darwin' and
blas_provider == 'openblas' and
blas_version < "0.3.21.dev",
reason="gges[float32] broken for OpenBLAS on macOS, see gh-16949"
)
def test_qz_single(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n]).astype(float32)
B = rng.random([n, n]).astype(float32)
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.T, A, decimal=5)
assert_array_almost_equal(Q @ BB @ Z.T, B, decimal=5)
assert_array_almost_equal(Q @ Q.T, eye(n), decimal=5)
assert_array_almost_equal(Z @ Z.T, eye(n), decimal=5)
assert_(np.all(diag(BB) >= 0))
def test_qz_double(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n])
B = rng.random([n, n])
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.T, A)
assert_array_almost_equal(Q @ BB @ Z.T, B)
assert_array_almost_equal(Q @ Q.T, eye(n))
assert_array_almost_equal(Z @ Z.T, eye(n))
assert_(np.all(diag(BB) >= 0))
def test_qz_complex(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n]) + 1j*rng.random([n, n])
B = rng.random([n, n]) + 1j*rng.random([n, n])
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.conj().T, A)
assert_array_almost_equal(Q @ BB @ Z.conj().T, B)
assert_array_almost_equal(Q @ Q.conj().T, eye(n))
assert_array_almost_equal(Z @ Z.conj().T, eye(n))
assert_(np.all(diag(BB) >= 0))
assert_(np.all(diag(BB).imag == 0))
def test_qz_complex64(self):
rng = np.random.RandomState(12345)
n = 5
A = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64)
B = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64)
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.conj().T, A, decimal=5)
assert_array_almost_equal(Q @ BB @ Z.conj().T, B, decimal=5)
assert_array_almost_equal(Q @ Q.conj().T, eye(n), decimal=5)
assert_array_almost_equal(Z @ Z.conj().T, eye(n), decimal=5)
assert_(np.all(diag(BB) >= 0))
assert_(np.all(diag(BB).imag == 0))
def test_qz_double_complex(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n])
B = rng.random([n, n])
AA, BB, Q, Z = qz(A, B, output='complex')
aa = Q @ AA @ Z.conj().T
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = Q @ BB @ Z.conj().T
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(Q @ Q.conj().T, eye(n))
assert_array_almost_equal(Z @ Z.conj().T, eye(n))
assert_(np.all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from https://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
assert_raises(ValueError, qz, A, B, sort=lambda ar, ai, beta: ai == 0)
if False:
AA, BB, Q, Z, sdim = qz(A, B, sort=lambda ar, ai, beta: ai == 0)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(Q @ AA @ Z.T, A)
assert_array_almost_equal(Q @ BB @ Z.T, B)
# test absolute values bc the sign is ambiguous and
# might be platform dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(np.all(np.real(eigenvalues[:sdim] < 0)))
# assert_(np.all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n])
B = rng.random([n, n])
AA, BB, Q, Z = qz(A, B, check_finite=False)
assert_array_almost_equal(Q @ AA @ Z.T, A)
assert_array_almost_equal(Q @ BB @ Z.T, B)
assert_array_almost_equal(Q @ Q.T, eye(n))
assert_array_almost_equal(Z @ Z.T, eye(n))
assert_(np.all(diag(BB) >= 0))
class TestOrdQZ:
@classmethod
def setup_class(cls):
# https://www.nag.com/lapack-ex/node119.html
A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
7.5 + 0.5j],
[-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
-10.5 - 1.5j],
[4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
-7.5 - 3.5j],
[5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
-19.0 - 32.5j]])
B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
[0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
[1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
[0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
# https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
A2 = np.array([[3.9, 12.5, -34.5, -0.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 21.5, -43.5, 3.5],
[4.4, 26.0, -46.0, 6.0]])
B2 = np.array([[1, 2, -3, 1],
[1, 3, -5, 4],
[1, 3, -4, 3],
[1, 3, -4, 4]])
# example with the eigenvalues
# -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
# 0.61244091
# thus featuring:
# * one complex conjugate eigenvalue pair,
# * one eigenvalue in the lhp
# * 2 eigenvalues in the unit circle
# * 2 non-real eigenvalues
A3 = np.array([[5., 1., 3., 3.],
[4., 4., 2., 7.],
[7., 4., 1., 3.],
[0., 4., 8., 7.]])
B3 = np.array([[8., 10., 6., 10.],
[7., 7., 2., 9.],
[9., 1., 6., 6.],
[5., 1., 4., 7.]])
# example with infinite eigenvalues
A4 = np.eye(2)
B4 = np.diag([0, 1])
# example with (alpha, beta) = (0, 0)
A5 = np.diag([1, 0])
cls.A = [A1, A2, A3, A4, A5]
cls.B = [B1, B2, B3, B4, A5]
def qz_decomp(self, sort):
with np.errstate(all='raise'):
ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)]
return tuple(ret)
def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
Id = np.eye(*A.shape)
# make sure Q and Z are orthogonal
assert_array_almost_equal(Q @ Q.T.conj(), Id)
assert_array_almost_equal(Z @ Z.T.conj(), Id)
# check factorization
assert_array_almost_equal(Q @ AA, A @ Z)
assert_array_almost_equal(Q @ BB, B @ Z)
# check shape of AA and BB
assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
# check eigenvalues
for i in range(A.shape[0]):
# does the current diagonal element belong to a 2-by-2 block
# that was already checked?
if i > 0 and A[i, i - 1] != 0:
continue
# take care of 2-by-2 blocks
if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
# make sure the pair of complex conjugate eigenvalues
# is ordered consistently (positive imaginary part first)
if evals[0].imag < 0:
evals = evals[[1, 0]]
tmp = alpha[i:i + 2]/beta[i:i + 2]
if tmp[0].imag < 0:
tmp = tmp[[1, 0]]
assert_array_almost_equal(evals, tmp)
else:
if alpha[i] == 0 and beta[i] == 0:
assert_equal(AA[i, i], 0)
assert_equal(BB[i, i], 0)
elif beta[i] == 0:
assert_equal(BB[i, i], 0)
else:
assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
sortfun = _select_function(sort)
lastsort = True
for i in range(A.shape[0]):
cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]]))
# once the sorting criterion was not matched all subsequent
# eigenvalues also shouldn't match
if not lastsort:
assert not cursort
lastsort = cursort
def check_all(self, sort):
ret = self.qz_decomp(sort)
for reti, Ai, Bi in zip(ret, self.A, self.B):
self.check(Ai, Bi, sort, *reti)
def test_lhp(self):
self.check_all('lhp')
def test_rhp(self):
self.check_all('rhp')
def test_iuc(self):
self.check_all('iuc')
def test_ouc(self):
self.check_all('ouc')
def test_ref(self):
# real eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0
return out
self.check_all(sort)
def test_cef(self):
# complex eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0
return out
self.check_all(sort)
def test_diff_input_types(self):
ret = ordqz(self.A[1], self.B[2], sort='lhp')
self.check(self.A[1], self.B[2], 'lhp', *ret)
ret = ordqz(self.B[2], self.A[1], sort='lhp')
self.check(self.B[2], self.A[1], 'lhp', *ret)
def test_sort_explicit(self):
# Test order of the eigenvalues in the 2 x 2 case where we can
# explicitly compute the solution
A1 = np.eye(2)
B1 = np.diag([-2, 0.5])
expected1 = [('lhp', [-0.5, 2]),
('rhp', [2, -0.5]),
('iuc', [-0.5, 2]),
('ouc', [2, -0.5])]
A2 = np.eye(2)
B2 = np.diag([-2 + 1j, 0.5 + 0.5j])
expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]),
('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])]
# 'lhp' is ambiguous so don't test it
A3 = np.eye(2)
B3 = np.diag([2, 0])
expected3 = [('rhp', [0.5, np.inf]),
('iuc', [0.5, np.inf]),
('ouc', [np.inf, 0.5])]
# 'rhp' is ambiguous so don't test it
A4 = np.eye(2)
B4 = np.diag([-2, 0])
expected4 = [('lhp', [-0.5, np.inf]),
('iuc', [-0.5, np.inf]),
('ouc', [np.inf, -0.5])]
A5 = np.diag([0, 1])
B5 = np.diag([0, 0.5])
# 'lhp' and 'iuc' are ambiguous so don't test them
expected5 = [('rhp', [2, np.nan]),
('ouc', [2, np.nan])]
A = [A1, A2, A3, A4, A5]
B = [B1, B2, B3, B4, B5]
expected = [expected1, expected2, expected3, expected4, expected5]
for Ai, Bi, expectedi in zip(A, B, expected):
for sortstr, expected_eigvals in expectedi:
_, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr)
azero = (alpha == 0)
bzero = (beta == 0)
x = np.empty_like(alpha)
x[azero & bzero] = np.nan
x[~azero & bzero] = np.inf
x[~bzero] = alpha[~bzero]/beta[~bzero]
assert_allclose(expected_eigvals, x)
class TestOrdQZWorkspaceSize:
def test_decompose(self):
rng = np.random.RandomState(12345)
N = 202
# raises error if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64]:
A = rng.random((N, N)).astype(ddtype)
B = rng.random((N, N)).astype(ddtype)
# sort = lambda ar, ai, b: ar**2 + ai**2 < b**2
_ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta,
output='real')
for ddtype in [np.complex128, np.complex64]:
A = rng.random((N, N)).astype(ddtype)
B = rng.random((N, N)).astype(ddtype)
_ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta,
output='complex')
@pytest.mark.slow
def test_decompose_ouc(self):
rng = np.random.RandomState(12345)
N = 202
# segfaults if lwork parameter to dtrsen is too small
for ddtype in [np.float32, np.float64, np.complex128, np.complex64]:
A = rng.random((N, N)).astype(ddtype)
B = rng.random((N, N)).astype(ddtype)
S, T, alpha, beta, U, V = ordqz(A, B, sort='ouc')
class TestDatacopied:
def test_datacopied(self):
from scipy.linalg._decomp import _datacopied
M = matrix([[0, 1], [2, 3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
"""Check linalg works with non-aligned memory (float32)"""
# Allocate 402 bytes of memory (allocated on boundary)
a = arange(402, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="crashes on ppc64le")
def test_aligned_mem():
"""Check linalg works with non-aligned memory (float64)"""
# Allocate 804 bytes of memory (allocated on boundary)
a = arange(804, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
"""Check that complex objects don't need to be completely aligned"""
# Allocate 1608 bytes of memory (allocated on boundary)
a = zeros(1608, dtype=np.uint8)
# Create an array with boundary offset 8
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z.shape = 10, 10
eig(z, overwrite_a=True)
# This does not need special handling
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i], np.ndarray):
# Try misaligning a[i]
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size,
dtype=a[i].dtype)
aa.shape = a[i].shape
aa[...] = a[i]
a[i] = aa
func(*a, **kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a, **kwargs)
@pytest.mark.xfail(run=False,
reason="Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10, dtype=float)
R = np.arange(100)
R.shape = 10, 10
S = np.arange(20000, dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
S.shape = 10, 10
b = np.ones(10)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig, (S,), dict(overwrite_a=True)), # crash
(eigvals, (S,), dict(overwrite_a=True)), # no crash
(lu, (S,), dict(overwrite_a=True)), # no crash
(lu_factor, (S,), dict(overwrite_a=True)), # no crash
(lu_solve, ((LU, piv), b), dict(overwrite_b=True)),
(solve, (S, b), dict(overwrite_a=True, overwrite_b=True)),
(svd, (M,), dict(overwrite_a=True)), # no crash
(svd, (R,), dict(overwrite_a=True)), # no crash
(svd, (S,), dict(overwrite_a=True)), # crash
(svdvals, (S,), dict()), # no crash
(svdvals, (S,), dict(overwrite_a=True)), # crash
(cholesky, (M,), dict(overwrite_a=True)), # no crash
(qr, (S,), dict(overwrite_a=True)), # crash
(rq, (S,), dict(overwrite_a=True)), # crash
(hessenberg, (S,), dict(overwrite_a=True)), # crash
(schur, (S,), dict(overwrite_a=True)), # crash
]:
check_lapack_misaligned(func, args, kwargs)
# not properly tested
# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
class TestOverwrite:
def test_eig(self):
assert_no_overwrite(eig, [(3, 3)])
assert_no_overwrite(eig, [(3, 3), (3, 3)])
def test_eigh(self):
assert_no_overwrite(eigh, [(3, 3)])
assert_no_overwrite(eigh, [(3, 3), (3, 3)])
def test_eig_banded(self):
assert_no_overwrite(eig_banded, [(3, 2)])
def test_eigvals(self):
assert_no_overwrite(eigvals, [(3, 3)])
def test_eigvalsh(self):
assert_no_overwrite(eigvalsh, [(3, 3)])
def test_eigvals_banded(self):
assert_no_overwrite(eigvals_banded, [(3, 2)])
def test_hessenberg(self):
assert_no_overwrite(hessenberg, [(3, 3)])
def test_lu_factor(self):
assert_no_overwrite(lu_factor, [(3, 3)])
def test_lu_solve(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 8]])
xlu = lu_factor(x)
assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
def test_lu(self):
assert_no_overwrite(lu, [(3, 3)])
def test_qr(self):
assert_no_overwrite(qr, [(3, 3)])
def test_rq(self):
assert_no_overwrite(rq, [(3, 3)])
def test_schur(self):
assert_no_overwrite(schur, [(3, 3)])
def test_schur_complex(self):
assert_no_overwrite(lambda a: schur(a, 'complex'), [(3, 3)],
dtypes=[np.float32, np.float64])
def test_svd(self):
assert_no_overwrite(svd, [(3, 3)])
assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3, 3)])
def test_svdvals(self):
assert_no_overwrite(svdvals, [(3, 3)])
def _check_orth(n, dtype, skip_big=False):
X = np.ones((n, 2), dtype=float).astype(dtype)
eps = np.finfo(dtype).eps
tol = 1000 * eps
Y = orth(X)
assert_equal(Y.shape, (n, 1))
assert_allclose(Y, Y.mean(), atol=tol)
Y = orth(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(Y, Y.mean(), atol=tol)
if n > 5 and not skip_big:
np.random.seed(1)
X = np.random.rand(n, 5) @ np.random.rand(5, n)
X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n)
X = X.astype(dtype)
Y = orth(X, rcond=1e-3)
assert_equal(Y.shape, (n, 5))
Y = orth(X, rcond=1e-6)
assert_equal(Y.shape, (n, 5 + 1))
@pytest.mark.slow
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
reason="test only on 64-bit, else too slow")
def test_orth_memory_efficiency():
# Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
# Keep in mind that @pytest.mark.slow tests are likely to be running
# under configurations that support 4Gb+ memory for tests related to
# 32 bit overflow.
n = 10*1000*1000
try:
_check_orth(n, np.float64, skip_big=True)
except MemoryError as e:
raise AssertionError(
'memory error perhaps caused by orth regression'
) from e
def test_orth():
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
sizes = [1, 2, 3, 10, 100]
for dt, n in itertools.product(dtypes, sizes):
_check_orth(n, dt)
def test_null_space():
np.random.seed(1)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
sizes = [1, 2, 3, 10, 100]
for dt, n in itertools.product(dtypes, sizes):
X = np.ones((2, n), dtype=dt)
eps = np.finfo(dt).eps
tol = 1000 * eps
Y = null_space(X)
assert_equal(Y.shape, (n, n-1))
assert_allclose(X @ Y, 0, atol=tol)
Y = null_space(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(X.T @ Y, 0, atol=tol)
X = np.random.randn(1 + n//2, n)
Y = null_space(X)
assert_equal(Y.shape, (n, n - 1 - n//2))
assert_allclose(X @ Y, 0, atol=tol)
if n > 5:
np.random.seed(1)
X = np.random.rand(n, 5) @ np.random.rand(5, n)
X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n)
X = X.astype(dt)
Y = null_space(X, rcond=1e-3)
assert_equal(Y.shape, (n, n - 5))
Y = null_space(X, rcond=1e-6)
assert_equal(Y.shape, (n, n - 6))
def test_subspace_angles():
H = hadamard(8, float)
A = H[:, :3]
B = H[:, 3:]
assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14)
assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14)
for x in (A, B):
assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]),
atol=1e-14)
# From MATLAB function "subspace", which effectively only returns the
# last value that we calculate
x = np.array(
[[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501
[1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501
[-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501
[0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501
expected = 1.481454682101605
assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected,
rtol=1e-12)
assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected,
rtol=1e-12)
expected = 0.746361174247302
assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12)
assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12)
expected = 0.487163718534313
assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12)
assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12)
expected = 0.328950515907756
assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0],
atol=1e-12)
# Degenerate conditions
assert_raises(ValueError, subspace_angles, x[0], x)
assert_raises(ValueError, subspace_angles, x, x[0])
assert_raises(ValueError, subspace_angles, x[:-1], x)
# Test branch if mask.any is True:
A = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0],
[0, 0, 0]])
B = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 1]])
expected = np.array([np.pi/2, 0, 0])
assert_allclose(subspace_angles(A, B), expected, rtol=1e-12)
# Complex
# second column in "b" does not affect result, just there so that
# b can have more cols than a, and vice-versa (both conditional code paths)
a = [[1 + 1j], [0]]
b = [[1 - 1j, 0], [0, 1]]
assert_allclose(subspace_angles(a, b), 0., atol=1e-14)
assert_allclose(subspace_angles(b, a), 0., atol=1e-14)
class TestCDF2RDF:
def matmul(self, a, b):
return np.einsum('...ij,...jk->...ik', a, b)
def assert_eig_valid(self, w, v, x):
assert_array_almost_equal(
self.matmul(v, w),
self.matmul(x, v)
)
def test_single_array0x0real(self):
# eig doesn't support 0x0 in old versions of numpy
X = np.empty((0, 0))
w, v = np.empty(0), np.empty((0, 0))
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array2x2_real(self):
X = np.array([[1, 2], [3, -1]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array2x2_complex(self):
X = np.array([[1, 2], [-2, 1]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array3x3_real(self):
X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_single_array3x3_complex(self):
X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_random_1d_stacked_arrays(self):
# cannot test M == 0 due to bug in old numpy
for M in range(1, 7):
np.random.seed(999999999)
X = np.random.rand(100, M, M)
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_random_2d_stacked_arrays(self):
# cannot test M == 0 due to bug in old numpy
for M in range(1, 7):
X = np.random.rand(10, 10, M, M)
w, v = np.linalg.eig(X)
wr, vr = cdf2rdf(w, v)
self.assert_eig_valid(wr, vr, X)
def test_low_dimensionality_error(self):
w, v = np.empty(()), np.array((2,))
assert_raises(ValueError, cdf2rdf, w, v)
def test_not_square_error(self):
# Check that passing a non-square array raises a ValueError.
w, v = np.arange(3), np.arange(6).reshape(3, 2)
assert_raises(ValueError, cdf2rdf, w, v)
def test_swapped_v_w_error(self):
# Check that exchanging places of w and v raises ValueError.
X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, v, w)
def test_non_associated_error(self):
# Check that passing non-associated eigenvectors raises a ValueError.
w, v = np.arange(3), np.arange(16).reshape(4, 4)
assert_raises(ValueError, cdf2rdf, w, v)
def test_not_conjugate_pairs(self):
# Check that passing non-conjugate pairs raises a ValueError.
X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, w, v)
# different arrays in the stack, so not conjugate
X = np.array([
[[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]],
[[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]],
])
w, v = np.linalg.eig(X)
assert_raises(ValueError, cdf2rdf, w, v)
| 104,327
| 36.951255
| 120
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_solvers.py
|
import os
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import pytest
from pytest import raises as assert_raises
from scipy.linalg import solve_sylvester
from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
from scipy.linalg import solve_continuous_are, solve_discrete_are
from scipy.linalg import block_diag, solve, LinAlgError
from scipy.sparse._sputils import matrix
def _load_data(name):
"""
Load npz data file under data/
Returns a copy of the data, rather than keeping the npz file open.
"""
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', name)
with np.load(filename) as f:
return dict(f.items())
class TestSolveLyapunov:
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(matrix([[0, 1], [-1/2, -1]]),
(matrix([0, 3]).T @ matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(matrix([[0, 1], [-1/2, -1]]),
(np.array(matrix([0, 3]).T @ matrix([0, 3]).T.T))),
]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal(
np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(
np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
def test_solve_continuous_are():
mat6 = _load_data('carex_6_data.npz')
mat15 = _load_data('carex_15_data.npz')
mat18 = _load_data('carex_18_data.npz')
mat19 = _load_data('carex_19_data.npz')
mat20 = _load_data('carex_20_data.npz')
cases = [
# Carex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# Test Case 0: carex #1
(np.diag([1.], 1),
np.array([[0], [1]]),
block_diag(1., 2.),
1,
None),
# Test Case 1: carex #2
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4.]]),
1,
None),
# Test Case 2: carex #3
(np.array([[0, 1, 0, 0],
[0, -1.89, 0.39, -5.53],
[0, -0.034, -2.98, 2.43],
[0.034, -0.0011, -0.99, -0.21]]),
np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
np.array([[2.313, 2.727, 0.688, 0.023],
[2.727, 4.271, 1.148, 0.323],
[0.688, 1.148, 0.313, 0.102],
[0.023, 0.323, 0.102, 0.083]]),
np.eye(2),
None),
# Test Case 3: carex #4
(np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
[0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
[0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
[0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
[0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
[0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
[0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
[0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
[-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
).T * 0.001,
np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
[0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
np.eye(2),
None),
# Test Case 4: carex #5
(np.array(
[[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
[-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
[-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
[-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
[-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
[0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
[0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
[0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
[0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
np.array([[0.010, -0.011, -0.151],
[0.003, -0.021, 0.000],
[0.009, -0.059, 0.000],
[0.024, -0.162, 0.000],
[0.068, -0.445, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
np.eye(9),
np.eye(3),
None),
# Test Case 5: carex #6
(mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
# Test Case 6: carex #7
(np.array([[1, 0], [0, -2.]]),
np.array([[1e-6], [0]]),
np.ones((2, 2)),
1.,
'Bad residual accuracy'),
# Test Case 7: carex #8
(block_diag(-0.1, -0.02),
np.array([[0.100, 0.000], [0.001, 0.010]]),
np.array([[100, 1000], [1000, 10000]]),
np.ones((2, 2)) + block_diag(1e-6, 0),
None),
# Test Case 8: carex #9
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1.]]),
np.eye(2),
1.,
None),
# Test Case 9: carex #10
(np.array([[1.0000001, 1], [1., 1.0000001]]),
np.eye(2),
np.eye(2),
np.eye(2),
None),
# Test Case 10: carex #11
(np.array([[3, 1.], [4, 2]]),
np.array([[1], [1]]),
np.array([[-11, -5], [-5, -2.]]),
1.,
None),
# Test Case 11: carex #12
(np.array([[7000000., 2000000., -0.],
[2000000., 6000000., -2000000.],
[0., -2000000., 5000000.]]) / 3,
np.eye(3),
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
np.diag([1e-6, 1, 1e6])).dot(
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
np.eye(3) * 1e6,
'Bad Residual Accuracy'),
# Test Case 12: carex #13
(np.array([[0, 0.4, 0, 0],
[0, 0, 0.345, 0],
[0, -0.524e6, -0.465e6, 0.262e6],
[0, 0, 0, -1e6]]),
np.array([[0, 0, 0, 1e6]]).T,
np.diag([1, 0, 1, 0]),
1.,
None),
# Test Case 13: carex #14
(np.array([[-1e-6, 1, 0, 0],
[-1, -1e-6, 0, 0],
[0, 0, 1e-6, 1],
[0, 0, -1, 1e-6]]),
np.ones((4, 1)),
np.ones((4, 4)),
1.,
None),
# Test Case 14: carex #15
(mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
# Test Case 15: carex #16
(np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
np.eye(64),
np.eye(64),
np.eye(64),
None),
# Test Case 16: carex #17
(np.diag(np.ones((20, )), 1),
np.flipud(np.eye(21, 1)),
np.eye(21, 1) * np.eye(21, 1).T,
1,
'Bad Residual Accuracy'),
# Test Case 17: carex #18
(mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
# Test Case 18: carex #19
(mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
'Bad Residual Accuracy'),
# Test Case 19: carex #20
(mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
'Bad Residual Accuracy')
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
None, 9, 14, 13, 14, None, 12, None, None)
def _test_factory(case, dec):
"""Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r)
res = x.dot(a) + a.conj().T.dot(x) + q
out_fact = x.dot(b)
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_discrete_are():
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
# [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# TEST CASE 0 : Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0], [1]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 1 :Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j], [1j]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 2 : Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# TEST CASE 3 : User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695], [0.05532739], [0.78934047]]),
np.eye(3),
np.eye(1),
None),
# TEST CASE 4 : darex #1
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4]]),
np.array([[1]]),
None),
# TEST CASE 5 : darex #2
(np.array([[0.9512, 0], [0, 0.9048]]),
np.array([[4.877, 4.877], [-1.1895, 3.569]]),
np.array([[0.005, 0], [0, 0.02]]),
np.array([[1/3, 0], [0, 3]]),
None),
# TEST CASE 6 : darex #3
(np.array([[2, -1], [1, 0]]),
np.array([[1], [0]]),
np.array([[0, 0], [0, 1]]),
np.array([[0]]),
None),
# TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1], [0, -1]]),
np.array([[1, 0], [2, 1]]),
np.array([[-4, -4], [-4, 7]]) * (1/11),
np.array([[9, 3], [3, 1]]),
None),
# TEST CASE 8 : darex #5
(np.array([[0, 1], [0, 0]]),
np.array([[0], [1]]),
np.array([[1, 2], [2, 4]]),
np.array([[1]]),
None),
# TEST CASE 9 : darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# TEST CASE 10 : darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# TEST CASE 11 : darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# TEST CASE 12 : darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# TEST CASE 13 : darex #10
(np.kron(np.eye(2), np.diag([1, 1], k=1)),
np.kron(np.eye(2), np.array([[0], [0], [1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0], [0, 1]]),
None),
# TEST CASE 14 : darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# TEST CASE 15 : darex #12 - numerically least accurate example
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1]]),
np.eye(2),
np.array([[1]]),
"Presumed issue with OpenBLAS, see gh-16926"),
# TEST CASE 16 : darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
"Issue with OpenBLAS, see gh-16926"),
# TEST CASE 17 : darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08], [0], [0], [0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
None),
# TEST CASE 18 : darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 14, 13,
14, 13, 13, 14, 12, 2, 5, 6, 10)
max_tol = [1.5 * 10**-ind for ind in min_decimal]
# relaxed tolerance in gh-18012 after bump to OpenBLAS
max_tol[11] = 2.5e-13
def _test_factory(case, atol):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r)
res = a.conj().T.dot(x.dot(a)) - x + q
res -= a.conj().T.dot(x.dot(b)).dot(
solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
)
# changed from
# assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
# in gh-18012 as it's easier to relax a tolerance and allclose is
# preferred
assert_allclose(res, np.zeros_like(res), atol=atol)
for ind, case in enumerate(cases):
_test_factory(case, max_tol[ind])
# An infeasible example taken from https://arxiv.org/abs/1505.04861v1
A = np.triu(np.ones((3, 3)))
A[0, 1] = -1
B = np.array([[1, 1, 0], [0, 0, 1]]).T
Q = np.full_like(A, -2) + np.diag([8, -1, -1.9])
R = np.diag([-10, 0.1])
assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
def test_solve_generalized_continuous_are():
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None)
]
min_decimal = (10, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r, e, s)
res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
out_fact = e.conj().T.dot(x).dot(b) + s
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_generalized_discrete_are():
mat20170120 = _load_data('gendare_20170120_data.npz')
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None),
# user-reported (under PR-6616) 20-Jan-2017
# tests against the case where E is None but S is provided
(mat20170120['A'],
mat20170120['B'],
mat20170120['Q'],
mat20170120['R'],
None,
mat20170120['S'],
None),
]
max_atol = (1.5e-11, 1.5e-11, 3.5e-16)
def _test_factory(case, atol):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r, e, s)
if e is None:
e = np.eye(a.shape[0])
if s is None:
s = np.zeros_like(b)
res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
res -= (a.conj().T.dot(x.dot(b)) + s).dot(
solve(r+b.conj().T.dot(x.dot(b)),
(b.conj().T.dot(x.dot(a)) + s.conj().T)
)
)
# changed from:
# assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
# in gh-17950 because of a Linux 32 bit fail.
assert_allclose(res, np.zeros_like(res), atol=atol)
for ind, case in enumerate(cases):
_test_factory(case, max_atol[ind])
def test_are_validate_args():
def test_square_shape():
nsq = np.ones((3, 2))
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nsq, 1, 1, 1)
assert_raises(ValueError, x, sq, sq, nsq, 1)
assert_raises(ValueError, x, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
def test_compatible_sizes():
nsq = np.ones((3, 2))
sq = np.eye(4)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, nsq, 1, 1)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
def test_symmetry():
nsym = np.arange(9).reshape(3, 3)
sym = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sym, sym, nsym, sym)
assert_raises(ValueError, x, sym, sym, sym, nsym)
def test_singularity():
sing = np.full((3, 3), 1e12)
sing[2, 2] -= 1
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, sq, sq, sq, sing)
assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
def test_finiteness():
nm = np.full((2, 2), np.nan)
sq = np.eye(2)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nm, sq, sq, sq)
assert_raises(ValueError, x, sq, nm, sq, sq)
assert_raises(ValueError, x, sq, sq, nm, sq)
assert_raises(ValueError, x, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
class TestSolveSylvester:
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0],
[0, 1.0, 2.0, 0.0],
[0, 0, 3.0, -4],
[0, 0, 2, 5]]),
np.array([[2.0, 0, 0, 1.0],
[0, 1.0, 0.0, 0.0],
[0, 0, 1.0, -1],
[0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1, 1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
| 31,556
| 39.6139
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_procrustes.py
|
from itertools import product, permutations
import numpy as np
from numpy.testing import assert_array_less, assert_allclose
from pytest import raises as assert_raises
from scipy.linalg import inv, eigh, norm
from scipy.linalg import orthogonal_procrustes
from scipy.sparse._sputils import matrix
def test_orthogonal_procrustes_ndim_too_large():
np.random.seed(1234)
A = np.random.randn(3, 4, 5)
B = np.random.randn(3, 4, 5)
assert_raises(ValueError, orthogonal_procrustes, A, B)
def test_orthogonal_procrustes_ndim_too_small():
np.random.seed(1234)
A = np.random.randn(3)
B = np.random.randn(3)
assert_raises(ValueError, orthogonal_procrustes, A, B)
def test_orthogonal_procrustes_shape_mismatch():
np.random.seed(1234)
shapes = ((3, 3), (3, 4), (4, 3), (4, 4))
for a, b in permutations(shapes, 2):
A = np.random.randn(*a)
B = np.random.randn(*b)
assert_raises(ValueError, orthogonal_procrustes, A, B)
def test_orthogonal_procrustes_checkfinite_exception():
np.random.seed(1234)
m, n = 2, 3
A_good = np.random.randn(m, n)
B_good = np.random.randn(m, n)
for bad_value in np.inf, -np.inf, np.nan:
A_bad = A_good.copy()
A_bad[1, 2] = bad_value
B_bad = B_good.copy()
B_bad[1, 2] = bad_value
for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)):
assert_raises(ValueError, orthogonal_procrustes, A, B)
def test_orthogonal_procrustes_scale_invariance():
np.random.seed(1234)
m, n = 4, 3
for i in range(3):
A_orig = np.random.randn(m, n)
B_orig = np.random.randn(m, n)
R_orig, s = orthogonal_procrustes(A_orig, B_orig)
for A_scale in np.square(np.random.randn(3)):
for B_scale in np.square(np.random.randn(3)):
R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale)
assert_allclose(R, R_orig)
def test_orthogonal_procrustes_array_conversion():
np.random.seed(1234)
for m, n in ((6, 4), (4, 4), (4, 6)):
A_arr = np.random.randn(m, n)
B_arr = np.random.randn(m, n)
As = (A_arr, A_arr.tolist(), matrix(A_arr))
Bs = (B_arr, B_arr.tolist(), matrix(B_arr))
R_arr, s = orthogonal_procrustes(A_arr, B_arr)
AR_arr = A_arr.dot(R_arr)
for A, B in product(As, Bs):
R, s = orthogonal_procrustes(A, B)
AR = A_arr.dot(R)
assert_allclose(AR, AR_arr)
def test_orthogonal_procrustes():
np.random.seed(1234)
for m, n in ((6, 4), (4, 4), (4, 6)):
# Sample a random target matrix.
B = np.random.randn(m, n)
# Sample a random orthogonal matrix
# by computing eigh of a sampled symmetric matrix.
X = np.random.randn(n, n)
w, V = eigh(X.T + X)
assert_allclose(inv(V), V.T)
# Compute a matrix with a known orthogonal transformation that gives B.
A = np.dot(B, V.T)
# Check that an orthogonal transformation from A to B can be recovered.
R, s = orthogonal_procrustes(A, B)
assert_allclose(inv(R), R.T)
assert_allclose(A.dot(R), B)
# Create a perturbed input matrix.
A_perturbed = A + 1e-2 * np.random.randn(m, n)
# Check that the orthogonal procrustes function can find an orthogonal
# transformation that is better than the orthogonal transformation
# computed from the original input matrix.
R_prime, s = orthogonal_procrustes(A_perturbed, B)
assert_allclose(inv(R_prime), R_prime.T)
# Compute the naive and optimal transformations of the perturbed input.
naive_approx = A_perturbed.dot(R)
optim_approx = A_perturbed.dot(R_prime)
# Compute the Frobenius norm errors of the matrix approximations.
naive_approx_error = norm(naive_approx - B, ord='fro')
optim_approx_error = norm(optim_approx - B, ord='fro')
# Check that the orthogonal Procrustes approximation is better.
assert_array_less(optim_approx_error, naive_approx_error)
def _centered(A):
mu = A.mean(axis=0)
return A - mu, mu
def test_orthogonal_procrustes_exact_example():
# Check a small application.
# It uses translation, scaling, reflection, and rotation.
#
# |
# a b |
# |
# d c | w
# |
# --------+--- x ----- z ---
# |
# | y
# |
#
A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float)
A, A_mu = _centered(A_orig)
B, B_mu = _centered(B_orig)
R, s = orthogonal_procrustes(A, B)
scale = s / np.square(norm(A))
B_approx = scale * np.dot(A, R) + B_mu
assert_allclose(B_approx, B_orig, atol=1e-8)
def test_orthogonal_procrustes_stretched_example():
# Try again with a target with a stretched y axis.
A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float)
A, A_mu = _centered(A_orig)
B, B_mu = _centered(B_orig)
R, s = orthogonal_procrustes(A, B)
scale = s / np.square(norm(A))
B_approx = scale * np.dot(A, R) + B_mu
expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float)
assert_allclose(B_approx, expected, atol=1e-8)
# Check disparity symmetry.
expected_disparity = 0.4501246882793018
AB_disparity = np.square(norm(B_approx - B_orig) / norm(B))
assert_allclose(AB_disparity, expected_disparity)
R, s = orthogonal_procrustes(B, A)
scale = s / np.square(norm(B))
A_approx = scale * np.dot(B, R) + A_mu
BA_disparity = np.square(norm(A_approx - A_orig) / norm(A))
assert_allclose(BA_disparity, expected_disparity)
def test_orthogonal_procrustes_skbio_example():
# This transformation is also exact.
# It uses translation, scaling, and reflection.
#
# |
# | a
# | b
# | c d
# --+---------
# |
# | w
# |
# | x
# |
# | z y
# |
#
A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float)
B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float)
B_standardized = np.array([
[-0.13363062, 0.6681531],
[-0.13363062, 0.13363062],
[-0.13363062, -0.40089186],
[0.40089186, -0.40089186]])
A, A_mu = _centered(A_orig)
B, B_mu = _centered(B_orig)
R, s = orthogonal_procrustes(A, B)
scale = s / np.square(norm(A))
B_approx = scale * np.dot(A, R) + B_mu
assert_allclose(B_approx, B_orig)
assert_allclose(B / norm(B), B_standardized)
| 6,758
| 34.203125
| 80
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_special_matrices.py
|
import pytest
import numpy as np
from numpy import arange, add, array, eye, copy, sqrt
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy.fft import fft
from scipy.special import comb
from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, dft,
companion, tri, triu, tril, kron, block_diag,
helmert, hilbert, invhilbert, pascal, invpascal,
fiedler, fiedler_companion, eigvals,
convolution_matrix)
from numpy.linalg import cond
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
dep_filter = np.testing.suppress_warnings()
dep_filter.filter(DeprecationWarning, "'tri'/'tril/'triu'")
@dep_filter
class TestTri:
def test_basic(self):
assert_equal(tri(4), array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]]))
assert_equal(tri(4, dtype='f'), array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]], 'f'))
def test_diag(self):
assert_equal(tri(4, k=1), array([[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]))
assert_equal(tri(4, k=-1), array([[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]]))
def test_2d(self):
assert_equal(tri(4, 3), array([[1, 0, 0],
[1, 1, 0],
[1, 1, 1],
[1, 1, 1]]))
assert_equal(tri(3, 4), array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]]))
def test_diag2d(self):
assert_equal(tri(3, 4, k=2), array([[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]))
assert_equal(tri(4, 3, k=-2), array([[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[1, 1, 0]]))
@dep_filter
class TestTril:
def test_basic(self):
a = (100*get_mat(5)).astype('l')
b = a.copy()
for k in range(5):
for l in range(k+1, 5):
b[k, l] = 0
assert_equal(tril(a), b)
def test_diag(self):
a = (100*get_mat(5)).astype('f')
b = a.copy()
for k in range(5):
for l in range(k+3, 5):
b[k, l] = 0
assert_equal(tril(a, k=2), b)
b = a.copy()
for k in range(5):
for l in range(max((k-1, 0)), 5):
b[k, l] = 0
assert_equal(tril(a, k=-2), b)
@dep_filter
class TestTriu:
def test_basic(self):
a = (100*get_mat(5)).astype('l')
b = a.copy()
for k in range(5):
for l in range(k+1, 5):
b[l, k] = 0
assert_equal(triu(a), b)
def test_diag(self):
a = (100*get_mat(5)).astype('f')
b = a.copy()
for k in range(5):
for l in range(max((k-1, 0)), 5):
b[l, k] = 0
assert_equal(triu(a, k=2), b)
b = a.copy()
for k in range(5):
for l in range(k+3, 5):
b[l, k] = 0
assert_equal(triu(a, k=-2), b)
@pytest.mark.parametrize("func", [tri, tril, triu])
def test_special_matrices_deprecation(func):
with pytest.warns(DeprecationWarning, match="'tri'/'tril/'triu'"):
func(np.array([[1]]))
class TestToeplitz:
def test_basic(self):
y = toeplitz([1, 2, 3])
assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]])
y = toeplitz([1, 2, 3], [1, 4, 5])
assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]])
def test_complex_01(self):
data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
x = copy(data)
t = toeplitz(x)
# Calling toeplitz should not change x.
assert_array_equal(x, data)
# According to the docstring, x should be the first column of t.
col0 = t[:, 0]
assert_array_equal(col0, data)
assert_array_equal(t[0, 1:], data[1:].conj())
def test_scalar_00(self):
"""Scalar arguments still produce a 2D array."""
t = toeplitz(10)
assert_array_equal(t, [[10]])
t = toeplitz(10, 20)
assert_array_equal(t, [[10]])
def test_scalar_01(self):
c = array([1, 2, 3])
t = toeplitz(c, 1)
assert_array_equal(t, [[1], [2], [3]])
def test_scalar_02(self):
c = array([1, 2, 3])
t = toeplitz(c, array(1))
assert_array_equal(t, [[1], [2], [3]])
def test_scalar_03(self):
c = array([1, 2, 3])
t = toeplitz(c, array([1]))
assert_array_equal(t, [[1], [2], [3]])
def test_scalar_04(self):
r = array([10, 2, 3])
t = toeplitz(1, r)
assert_array_equal(t, [[1, 2, 3]])
class TestHankel:
def test_basic(self):
y = hankel([1, 2, 3])
assert_array_equal(y, [[1, 2, 3], [2, 3, 0], [3, 0, 0]])
y = hankel([1, 2, 3], [3, 4, 5])
assert_array_equal(y, [[1, 2, 3], [2, 3, 4], [3, 4, 5]])
class TestCirculant:
def test_basic(self):
y = circulant([1, 2, 3])
assert_array_equal(y, [[1, 3, 2], [2, 1, 3], [3, 2, 1]])
class TestHadamard:
def test_basic(self):
y = hadamard(1)
assert_array_equal(y, [[1]])
y = hadamard(2, dtype=float)
assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])
y = hadamard(4)
assert_array_equal(y, [[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
assert_raises(ValueError, hadamard, 0)
assert_raises(ValueError, hadamard, 5)
class TestLeslie:
def test_bad_shapes(self):
assert_raises(ValueError, leslie, [[1, 1], [2, 2]], [3, 4, 5])
assert_raises(ValueError, leslie, [3, 4, 5], [[1, 1], [2, 2]])
assert_raises(ValueError, leslie, [1, 2], [1, 2])
assert_raises(ValueError, leslie, [1], [])
def test_basic(self):
a = leslie([1, 2, 3], [0.25, 0.5])
expected = array([[1.0, 2.0, 3.0],
[0.25, 0.0, 0.0],
[0.0, 0.5, 0.0]])
assert_array_equal(a, expected)
class TestCompanion:
def test_bad_shapes(self):
assert_raises(ValueError, companion, [[1, 1], [2, 2]])
assert_raises(ValueError, companion, [0, 4, 5])
assert_raises(ValueError, companion, [1])
assert_raises(ValueError, companion, [])
def test_basic(self):
c = companion([1, 2, 3])
expected = array([
[-2.0, -3.0],
[1.0, 0.0]])
assert_array_equal(c, expected)
c = companion([2.0, 5.0, -10.0])
expected = array([
[-2.5, 5.0],
[1.0, 0.0]])
assert_array_equal(c, expected)
class TestBlockDiag:
def test_basic(self):
x = block_diag(eye(2), [[1, 2], [3, 4], [5, 6]], [[1, 2, 3]])
assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0, 0],
[0, 0, 3, 4, 0, 0, 0],
[0, 0, 5, 6, 0, 0, 0],
[0, 0, 0, 0, 1, 2, 3]])
def test_dtype(self):
x = block_diag([[1.5]])
assert_equal(x.dtype, float)
x = block_diag([[True]])
assert_equal(x.dtype, bool)
def test_mixed_dtypes(self):
actual = block_diag([[1]], [[1j]])
desired = np.array([[1, 0], [0, 1j]])
assert_array_equal(actual, desired)
def test_scalar_and_1d_args(self):
a = block_diag(1)
assert_equal(a.shape, (1, 1))
assert_array_equal(a, [[1]])
a = block_diag([2, 3], 4)
assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
def test_bad_arg(self):
assert_raises(ValueError, block_diag, [[[1]]])
def test_no_args(self):
a = block_diag()
assert_equal(a.ndim, 2)
assert_equal(a.nbytes, 0)
def test_empty_matrix_arg(self):
# regression test for gh-4596: check the shape of the result
# for empty matrix inputs. Empty matrices are no longer ignored
# (gh-4908) it is viewed as a shape (1, 0) matrix.
a = block_diag([[1, 0], [0, 1]],
[],
[[2, 3], [4, 5], [6, 7]])
assert_array_equal(a, [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 2, 3],
[0, 0, 4, 5],
[0, 0, 6, 7]])
def test_zerosized_matrix_arg(self):
# test for gh-4908: check the shape of the result for
# zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0).
# note that [[]] takes shape (1,0)
a = block_diag([[1, 0], [0, 1]],
[[]],
[[2, 3], [4, 5], [6, 7]],
np.zeros([0, 2], dtype='int32'))
assert_array_equal(a, [[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 2, 3, 0, 0],
[0, 0, 4, 5, 0, 0],
[0, 0, 6, 7, 0, 0]])
class TestKron:
def test_basic(self):
a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))
assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]]))
m1 = array([[1, 2], [3, 4]])
m2 = array([[10], [11]])
a = kron(m1, m2)
expected = array([[10, 20],
[11, 22],
[30, 40],
[33, 44]])
assert_array_equal(a, expected)
class TestHelmert:
def test_orthogonality(self):
for n in range(1, 7):
H = helmert(n, full=True)
Id = np.eye(n)
assert_allclose(H.dot(H.T), Id, atol=1e-12)
assert_allclose(H.T.dot(H), Id, atol=1e-12)
def test_subspace(self):
for n in range(2, 7):
H_full = helmert(n, full=True)
H_partial = helmert(n)
for U in H_full[1:, :].T, H_partial.T:
C = np.eye(n) - np.full((n, n), 1 / n)
assert_allclose(U.dot(U.T), C)
assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)
class TestHilbert:
def test_basic(self):
h3 = array([[1.0, 1/2., 1/3.],
[1/2., 1/3., 1/4.],
[1/3., 1/4., 1/5.]])
assert_array_almost_equal(hilbert(3), h3)
assert_array_equal(hilbert(1), [[1.0]])
h0 = hilbert(0)
assert_equal(h0.shape, (0, 0))
class TestInvHilbert:
def test_basic(self):
invh1 = array([[1]])
assert_array_equal(invhilbert(1, exact=True), invh1)
assert_array_equal(invhilbert(1), invh1)
invh2 = array([[4, -6],
[-6, 12]])
assert_array_equal(invhilbert(2, exact=True), invh2)
assert_array_almost_equal(invhilbert(2), invh2)
invh3 = array([[9, -36, 30],
[-36, 192, -180],
[30, -180, 180]])
assert_array_equal(invhilbert(3, exact=True), invh3)
assert_array_almost_equal(invhilbert(3), invh3)
invh4 = array([[16, -120, 240, -140],
[-120, 1200, -2700, 1680],
[240, -2700, 6480, -4200],
[-140, 1680, -4200, 2800]])
assert_array_equal(invhilbert(4, exact=True), invh4)
assert_array_almost_equal(invhilbert(4), invh4)
invh5 = array([[25, -300, 1050, -1400, 630],
[-300, 4800, -18900, 26880, -12600],
[1050, -18900, 79380, -117600, 56700],
[-1400, 26880, -117600, 179200, -88200],
[630, -12600, 56700, -88200, 44100]])
assert_array_equal(invhilbert(5, exact=True), invh5)
assert_array_almost_equal(invhilbert(5), invh5)
invh17 = array([
[289, -41616, 1976760, -46124400, 629598060, -5540462928,
33374693352, -143034400080, 446982500250, -1033026222800,
1774926873720, -2258997839280, 2099709530100, -1384423866000,
613101997800, -163493866080, 19835652870],
[-41616, 7990272, -426980160, 10627061760, -151103534400,
1367702848512, -8410422724704, 36616806420480, -115857864064800,
270465047424000, -468580694662080, 600545887119360,
-561522320049600, 372133135180800, -165537539406000,
44316454993920, -5395297580640],
[1976760, -426980160, 24337869120, -630981792000, 9228108708000,
-85267724461920, 532660105897920, -2348052711713280,
7504429831470000, -17664748409880000, 30818191841236800,
-39732544853164800, 37341234283298400, -24857330514030000,
11100752642520000, -2982128117299200, 364182586693200],
[-46124400, 10627061760, -630981792000, 16826181120000,
-251209625940000, 2358021022156800, -14914482965141760,
66409571644416000, -214015221119700000, 507295338950400000,
-890303319857952000, 1153715376477081600, -1089119333262870000,
727848632044800000, -326170262829600000, 87894302404608000,
-10763618673376800],
[629598060, -151103534400, 9228108708000,
-251209625940000, 3810012660090000, -36210360321495360,
231343968720664800, -1038687206500944000, 3370739732635275000,
-8037460526495400000, 14178080368737885600, -18454939322943942000,
17489975175339030000, -11728977435138600000, 5272370630081100000,
-1424711708039692800, 174908803442373000],
[-5540462928, 1367702848512, -85267724461920, 2358021022156800,
-36210360321495360, 347619459086355456, -2239409617216035264,
10124803292907663360, -33052510749726468000,
79217210949138662400, -140362995650505067440,
183420385176741672960, -174433352415381259200,
117339159519533952000, -52892422160973595200,
14328529177999196160, -1763080738699119840],
[33374693352, -8410422724704, 532660105897920,
-14914482965141760, 231343968720664800, -2239409617216035264,
14527452132196331328, -66072377044391477760,
216799987176909536400, -521925895055522958000,
928414062734059661760, -1217424500995626443520,
1161358898976091015200, -783401860847777371200,
354015418167362952000, -96120549902411274240,
11851820521255194480],
[-143034400080, 36616806420480, -2348052711713280,
66409571644416000, -1038687206500944000, 10124803292907663360,
-66072377044391477760, 302045152202932469760,
-995510145200094810000, 2405996923185123840000,
-4294704507885446054400, 5649058909023744614400,
-5403874060541811254400, 3654352703663101440000,
-1655137020003255360000, 450325202737117593600,
-55630994283442749600],
[446982500250, -115857864064800, 7504429831470000,
-214015221119700000, 3370739732635275000, -33052510749726468000,
216799987176909536400, -995510145200094810000,
3293967392206196062500, -7988661659013106500000,
14303908928401362270000, -18866974090684772052000,
18093328327706957325000, -12263364009096700500000,
5565847995255512250000, -1517208935002984080000,
187754605706619279900],
[-1033026222800, 270465047424000, -17664748409880000,
507295338950400000, -8037460526495400000, 79217210949138662400,
-521925895055522958000, 2405996923185123840000,
-7988661659013106500000, 19434404971634224000000,
-34894474126569249192000, 46141453390504792320000,
-44349976506971935800000, 30121928988527376000000,
-13697025107665828500000, 3740200989399948902400,
-463591619028689580000],
[1774926873720, -468580694662080,
30818191841236800, -890303319857952000, 14178080368737885600,
-140362995650505067440, 928414062734059661760,
-4294704507885446054400, 14303908928401362270000,
-34894474126569249192000, 62810053427824648545600,
-83243376594051600326400, 80177044485212743068000,
-54558343880470209780000, 24851882355348879230400,
-6797096028813368678400, 843736746632215035600],
[-2258997839280, 600545887119360, -39732544853164800,
1153715376477081600, -18454939322943942000, 183420385176741672960,
-1217424500995626443520, 5649058909023744614400,
-18866974090684772052000, 46141453390504792320000,
-83243376594051600326400, 110552468520163390156800,
-106681852579497947388000, 72720410752415168870400,
-33177973900974346080000, 9087761081682520473600,
-1129631016152221783200],
[2099709530100, -561522320049600, 37341234283298400,
-1089119333262870000, 17489975175339030000,
-174433352415381259200, 1161358898976091015200,
-5403874060541811254400, 18093328327706957325000,
-44349976506971935800000, 80177044485212743068000,
-106681852579497947388000, 103125790826848015808400,
-70409051543137015800000, 32171029219823375700000,
-8824053728865840192000, 1098252376814660067000],
[-1384423866000, 372133135180800,
-24857330514030000, 727848632044800000, -11728977435138600000,
117339159519533952000, -783401860847777371200,
3654352703663101440000, -12263364009096700500000,
30121928988527376000000, -54558343880470209780000,
72720410752415168870400, -70409051543137015800000,
48142941226076592000000, -22027500987368499000000,
6049545098753157120000, -753830033789944188000],
[613101997800, -165537539406000,
11100752642520000, -326170262829600000, 5272370630081100000,
-52892422160973595200, 354015418167362952000,
-1655137020003255360000, 5565847995255512250000,
-13697025107665828500000, 24851882355348879230400,
-33177973900974346080000, 32171029219823375700000,
-22027500987368499000000, 10091416708498869000000,
-2774765838662800128000, 346146444087219270000],
[-163493866080, 44316454993920, -2982128117299200,
87894302404608000, -1424711708039692800,
14328529177999196160, -96120549902411274240,
450325202737117593600, -1517208935002984080000,
3740200989399948902400, -6797096028813368678400,
9087761081682520473600, -8824053728865840192000,
6049545098753157120000, -2774765838662800128000,
763806510427609497600, -95382575704033754400],
[19835652870, -5395297580640, 364182586693200, -10763618673376800,
174908803442373000, -1763080738699119840, 11851820521255194480,
-55630994283442749600, 187754605706619279900,
-463591619028689580000, 843736746632215035600,
-1129631016152221783200, 1098252376814660067000,
-753830033789944188000, 346146444087219270000,
-95382575704033754400, 11922821963004219300]
])
assert_array_equal(invhilbert(17, exact=True), invh17)
assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)
def test_inverse(self):
for n in range(1, 10):
a = hilbert(n)
b = invhilbert(n)
# The Hilbert matrix is increasingly badly conditioned,
# so take that into account in the test
c = cond(a)
assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
class TestPascal:
cases = [
(1, array([[1]]), array([[1]])),
(2, array([[1, 1],
[1, 2]]),
array([[1, 0],
[1, 1]])),
(3, array([[1, 1, 1],
[1, 2, 3],
[1, 3, 6]]),
array([[1, 0, 0],
[1, 1, 0],
[1, 2, 1]])),
(4, array([[1, 1, 1, 1],
[1, 2, 3, 4],
[1, 3, 6, 10],
[1, 4, 10, 20]]),
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]])),
]
def check_case(self, n, sym, low):
assert_array_equal(pascal(n), sym)
assert_array_equal(pascal(n, kind='lower'), low)
assert_array_equal(pascal(n, kind='upper'), low.T)
assert_array_almost_equal(pascal(n, exact=False), sym)
assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
def test_cases(self):
for n, sym, low in self.cases:
self.check_case(n, sym, low)
def test_big(self):
p = pascal(50)
assert p[-1, -1] == comb(98, 49, exact=True)
def test_threshold(self):
# Regression test. An early version of `pascal` returned an
# array of type np.uint64 for n=35, but that data type is too small
# to hold p[-1, -1]. The second assert_equal below would fail
# because p[-1, -1] overflowed.
p = pascal(34)
assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
p = pascal(35)
assert_equal(2.*p.item(-1, -2), 1.*p.item(-1, -1), err_msg="n = 35")
def test_invpascal():
def check_invpascal(n, kind, exact):
ip = invpascal(n, kind=kind, exact=exact)
p = pascal(n, kind=kind, exact=exact)
# Matrix-multiply ip and p, and check that we get the identity matrix.
# We can't use the simple expression e = ip.dot(p), because when
# n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
# np.int64. The product of those dtypes is np.float64, which loses
# precision when n is greater than 18. Instead we'll cast both to
# object arrays, and then multiply.
e = ip.astype(object).dot(p.astype(object))
assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" %
(n, kind, exact))
kinds = ['symmetric', 'lower', 'upper']
ns = [1, 2, 5, 18]
for n in ns:
for kind in kinds:
for exact in [True, False]:
check_invpascal(n, kind, exact)
ns = [19, 34, 35, 50]
for n in ns:
for kind in kinds:
check_invpascal(n, kind, True)
def test_dft():
m = dft(2)
expected = array([[1.0, 1.0], [1.0, -1.0]])
assert_array_almost_equal(m, expected)
m = dft(2, scale='n')
assert_array_almost_equal(m, expected/2.0)
m = dft(2, scale='sqrtn')
assert_array_almost_equal(m, expected/sqrt(2.0))
x = array([0, 1, 2, 3, 4, 5, 0, 1])
m = dft(8)
mx = m.dot(x)
fx = fft(x)
assert_array_almost_equal(mx, fx)
def test_fiedler():
f = fiedler([])
assert_equal(f.size, 0)
f = fiedler([123.])
assert_array_equal(f, np.array([[0.]]))
f = fiedler(np.arange(1, 7))
des = np.array([[0, 1, 2, 3, 4, 5],
[1, 0, 1, 2, 3, 4],
[2, 1, 0, 1, 2, 3],
[3, 2, 1, 0, 1, 2],
[4, 3, 2, 1, 0, 1],
[5, 4, 3, 2, 1, 0]])
assert_array_equal(f, des)
def test_fiedler_companion():
fc = fiedler_companion([])
assert_equal(fc.size, 0)
fc = fiedler_companion([1.])
assert_equal(fc.size, 0)
fc = fiedler_companion([1., 2.])
assert_array_equal(fc, np.array([[-2.]]))
fc = fiedler_companion([1e-12, 2., 3.])
assert_array_almost_equal(fc, companion([1e-12, 2., 3.]))
with assert_raises(ValueError):
fiedler_companion([0, 1, 2])
fc = fiedler_companion([1., -16., 86., -176., 105.])
assert_array_almost_equal(eigvals(fc),
np.array([7., 5., 3., 1.]))
class TestConvolutionMatrix:
"""
Test convolution_matrix vs. numpy.convolve for various parameters.
"""
def create_vector(self, n, cpx):
"""Make a complex or real test vector of length n."""
x = np.linspace(-2.5, 2.2, n)
if cpx:
x = x + 1j*np.linspace(-1.5, 3.1, n)
return x
def test_bad_n(self):
# n must be a positive integer
with pytest.raises(ValueError, match='n must be a positive integer'):
convolution_matrix([1, 2, 3], 0)
def test_bad_first_arg(self):
# first arg must be a 1d array, otherwise ValueError
with pytest.raises(ValueError, match='one-dimensional'):
convolution_matrix(1, 4)
def test_empty_first_arg(self):
# first arg must have at least one value
with pytest.raises(ValueError, match=r'len\(a\)'):
convolution_matrix([], 4)
def test_bad_mode(self):
# mode must be in ('full', 'valid', 'same')
with pytest.raises(ValueError, match='mode.*must be one of'):
convolution_matrix((1, 1), 4, mode='invalid argument')
@pytest.mark.parametrize('cpx', [False, True])
@pytest.mark.parametrize('na', [1, 2, 9])
@pytest.mark.parametrize('nv', [1, 2, 9])
@pytest.mark.parametrize('mode', [None, 'full', 'valid', 'same'])
def test_against_numpy_convolve(self, cpx, na, nv, mode):
a = self.create_vector(na, cpx)
v = self.create_vector(nv, cpx)
if mode is None:
y1 = np.convolve(v, a)
A = convolution_matrix(a, nv)
else:
y1 = np.convolve(v, a, mode)
A = convolution_matrix(a, nv, mode)
y2 = A @ v
assert_array_almost_equal(y1, y2)
| 27,049
| 37.587732
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp_cossin.py
|
import pytest
import numpy as np
from numpy.random import seed
from numpy.testing import assert_allclose
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
from scipy.linalg import cossin, get_lapack_funcs
REAL_DTYPES = (np.float32, np.float64)
COMPLEX_DTYPES = (np.complex64, np.complex128)
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('m, p, q',
[
(2, 1, 1),
(3, 2, 1),
(3, 1, 2),
(4, 2, 2),
(4, 1, 2),
(40, 12, 20),
(40, 30, 1),
(40, 1, 30),
(100, 50, 1),
(100, 50, 50),
])
@pytest.mark.parametrize('swap_sign', [True, False])
def test_cossin(dtype_, m, p, q, swap_sign):
seed(1234)
if dtype_ in COMPLEX_DTYPES:
x = np.array(unitary_group.rvs(m), dtype=dtype_)
else:
x = np.array(ortho_group.rvs(m), dtype=dtype_)
u, cs, vh = cossin(x, p, q,
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
# Test for float32 or float 64
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
_, cs2, vh2 = cossin(x, p, q,
compute_u=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
u2, cs2, _ = cossin(x, p, q,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
_, cs2, _ = cossin(x, p, q,
compute_u=False,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
def test_cossin_mixed_types():
seed(1234)
x = np.array(ortho_group.rvs(4), dtype=np.float64)
u, cs, vh = cossin([x[:2, :2],
np.array(x[:2, 2:], dtype=np.complex128),
x[2:, :2],
x[2:, 2:]])
assert u.dtype == np.complex128
assert cs.dtype == np.float64
assert vh.dtype == np.complex128
assert_allclose(x, u @ cs @ vh, rtol=0.,
atol=1e4 * np.finfo(np.complex128).eps)
def test_cossin_error_incorrect_subblocks():
with pytest.raises(ValueError, match="be due to missing p, q arguments."):
cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
def test_cossin_error_empty_subblocks():
with pytest.raises(ValueError, match="x11.*empty"):
cossin(([], [], [], []))
with pytest.raises(ValueError, match="x12.*empty"):
cossin(([1, 2], [], [6, 7], [8, 9, 10]))
with pytest.raises(ValueError, match="x21.*empty"):
cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
with pytest.raises(ValueError, match="x22.*empty"):
cossin(([1, 2], [3, 4, 5], [2], []))
def test_cossin_error_missing_partitioning():
with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
cossin(unitary_group.rvs(2))
with pytest.raises(ValueError, match=".*might be due to missing p, q"):
cossin(unitary_group.rvs(4))
def test_cossin_error_non_iterable():
with pytest.raises(ValueError, match="containing the subblocks of X"):
cossin(12j)
def test_cossin_error_non_square():
with pytest.raises(ValueError, match="only supports square"):
cossin(np.array([[1, 2]]), 1, 1)
def test_cossin_error_partitioning():
x = np.array(ortho_group.rvs(4), dtype=np.float64)
with pytest.raises(ValueError, match="invalid p=0.*0<p<4.*"):
cossin(x, 0, 1)
with pytest.raises(ValueError, match="invalid p=4.*0<p<4.*"):
cossin(x, 4, 1)
with pytest.raises(ValueError, match="invalid q=-2.*0<q<4.*"):
cossin(x, 1, -2)
with pytest.raises(ValueError, match="invalid q=5.*0<q<4.*"):
cossin(x, 1, 5)
@pytest.mark.parametrize("dtype_", DTYPES)
def test_cossin_separate(dtype_):
seed(1234)
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
X = np.array(X, dtype=dtype_)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'),[X])
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'],
lwval))
*_, theta, u1, u2, v1t, v2t, _ = \
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
(u1_2, u2_2), theta2, (v1t_2, v2t_2) = cossin(X, p, q, separate=True)
assert_allclose(u1_2, u1, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(u2_2, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v1t_2, v1t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v2t_2, v2t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(theta2, theta, rtol=0., atol=10*np.finfo(dtype_).eps)
| 5,772
| 36.00641
| 78
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp_lu.py
|
import pytest
from pytest import raises as assert_raises
import numpy as np
from scipy.linalg import lu, lu_factor, lu_solve, get_lapack_funcs, solve
from numpy.testing import assert_allclose, assert_array_equal
class TestLU:
def setup_method(self):
self.rng = np.random.default_rng(1682281250228846)
def test_old_lu_smoke_tests(self):
"Tests from old fortran based lu test suite"
a = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
p, l, u = lu(a)
result_lu = np.array([[2., 5., 6.], [0.5, -0.5, 0.], [0.5, 1., 0.]])
assert_allclose(p, np.rot90(np.eye(3)))
assert_allclose(l, np.tril(result_lu, k=-1)+np.eye(3))
assert_allclose(u, np.triu(result_lu))
a = np.array([[1, 2, 3], [1, 2, 3], [2, 5j, 6]])
p, l, u = lu(a)
result_lu = np.array([[2., 5.j, 6.], [0.5, 2-2.5j, 0.], [0.5, 1., 0.]])
assert_allclose(p, np.rot90(np.eye(3)))
assert_allclose(l, np.tril(result_lu, k=-1)+np.eye(3))
assert_allclose(u, np.triu(result_lu))
b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
p, l, u = lu(b)
assert_allclose(p, np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]))
assert_allclose(l, np.array([[1, 0, 0], [1/7, 1, 0], [4/7, 0.5, 1]]))
assert_allclose(u, np.array([[7, 8, 9], [0, 6/7, 12/7], [0, 0, 0]]),
rtol=0., atol=1e-14)
cb = np.array([[1.j, 2.j, 3.j], [4j, 5j, 6j], [7j, 8j, 9j]])
p, l, u = lu(cb)
assert_allclose(p, np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]))
assert_allclose(l, np.array([[1, 0, 0], [1/7, 1, 0], [4/7, 0.5, 1]]))
assert_allclose(u, np.array([[7, 8, 9], [0, 6/7, 12/7], [0, 0, 0]])*1j,
rtol=0., atol=1e-14)
# Rectangular matrices
hrect = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
p, l, u = lu(hrect)
assert_allclose(p, np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]))
assert_allclose(l, np.array([[1, 0, 0], [1/9, 1, 0], [5/9, 0.5, 1]]))
assert_allclose(u, np.array([[9, 10, 12, 12], [0, 8/9, 15/9, 24/9],
[0, 0, -0.5, 0]]), rtol=0., atol=1e-14)
chrect = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])*1.j
p, l, u = lu(chrect)
assert_allclose(p, np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]))
assert_allclose(l, np.array([[1, 0, 0], [1/9, 1, 0], [5/9, 0.5, 1]]))
assert_allclose(u, np.array([[9, 10, 12, 12], [0, 8/9, 15/9, 24/9],
[0, 0, -0.5, 0]])*1j, rtol=0., atol=1e-14)
vrect = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
p, l, u = lu(vrect)
assert_allclose(p, np.eye(4)[[1, 3, 2, 0], :])
assert_allclose(l, np.array([[1., 0, 0], [0.1, 1, 0], [0.7, -0.5, 1],
[0.4, 0.25, 0.5]]))
assert_allclose(u, np.array([[10, 12, 12],
[0, 0.8, 1.8],
[0, 0, 1.5]]))
cvrect = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])*1j
p, l, u = lu(cvrect)
assert_allclose(p, np.eye(4)[[1, 3, 2, 0], :])
assert_allclose(l, np.array([[1., 0, 0],
[0.1, 1, 0],
[0.7, -0.5, 1],
[0.4, 0.25, 0.5]]))
assert_allclose(u, np.array([[10, 12, 12],
[0, 0.8, 1.8],
[0, 0, 1.5]])*1j)
@pytest.mark.parametrize('shape', [[2, 2], [2, 4], [4, 2], [20, 20],
[20, 4], [4, 20], [3, 2, 9, 9],
[2, 2, 17, 5], [2, 2, 11, 7]])
def test_simple_lu_shapes_real_complex(self, shape):
a = self.rng.uniform(-10., 10., size=shape)
p, l, u = lu(a)
assert_allclose(a, p @ l @ u)
pl, u = lu(a, permute_l=True)
assert_allclose(a, pl @ u)
b = self.rng.uniform(-10., 10., size=shape)*1j
b += self.rng.uniform(-10, 10, size=shape)
pl, u = lu(b, permute_l=True)
assert_allclose(b, pl @ u)
@pytest.mark.parametrize('shape', [[2, 2], [2, 4], [4, 2], [20, 20],
[20, 4], [4, 20]])
def test_simple_lu_shapes_real_complex_2d_indices(self, shape):
a = self.rng.uniform(-10., 10., size=shape)
p, l, u = lu(a, p_indices=True)
assert_allclose(a, l[p, :] @ u)
def test_1by1_input_output(self):
a = self.rng.random([4, 5, 1, 1], dtype=np.float32)
p, l, u = lu(a, p_indices=True)
assert_allclose(p, np.zeros(shape=(4, 5, 1), dtype=int))
assert_allclose(l, np.ones(shape=(4, 5, 1, 1), dtype=np.float32))
assert_allclose(u, a)
a = self.rng.random([4, 5, 1, 1], dtype=np.float32)
p, l, u = lu(a)
assert_allclose(p, np.ones(shape=(4, 5, 1, 1), dtype=np.float32))
assert_allclose(l, np.ones(shape=(4, 5, 1, 1), dtype=np.float32))
assert_allclose(u, a)
pl, u = lu(a, permute_l=True)
assert_allclose(pl, np.ones(shape=(4, 5, 1, 1), dtype=np.float32))
assert_allclose(u, a)
a = self.rng.random([4, 5, 1, 1], dtype=np.float32)*np.complex64(1.j)
p, l, u = lu(a)
assert_allclose(p, np.ones(shape=(4, 5, 1, 1), dtype=np.complex64))
assert_allclose(l, np.ones(shape=(4, 5, 1, 1), dtype=np.complex64))
assert_allclose(u, a)
def test_empty_edge_cases(self):
a = np.empty([0, 0])
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(0, 0), dtype=np.float64))
assert_allclose(l, np.empty(shape=(0, 0), dtype=np.float64))
assert_allclose(u, np.empty(shape=(0, 0), dtype=np.float64))
a = np.empty([0, 3], dtype=np.float16)
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(0, 0), dtype=np.float32))
assert_allclose(l, np.empty(shape=(0, 0), dtype=np.float32))
assert_allclose(u, np.empty(shape=(0, 3), dtype=np.float32))
a = np.empty([3, 0], dtype=np.complex64)
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(0, 0), dtype=np.float32))
assert_allclose(l, np.empty(shape=(3, 0), dtype=np.complex64))
assert_allclose(u, np.empty(shape=(0, 0), dtype=np.complex64))
p, l, u = lu(a, p_indices=True)
assert_allclose(p, np.empty(shape=(0,), dtype=int))
assert_allclose(l, np.empty(shape=(3, 0), dtype=np.complex64))
assert_allclose(u, np.empty(shape=(0, 0), dtype=np.complex64))
pl, u = lu(a, permute_l=True)
assert_allclose(pl, np.empty(shape=(3, 0), dtype=np.complex64))
assert_allclose(u, np.empty(shape=(0, 0), dtype=np.complex64))
a = np.empty([3, 0, 0], dtype=np.complex64)
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(3, 0, 0), dtype=np.float32))
assert_allclose(l, np.empty(shape=(3, 0, 0), dtype=np.complex64))
assert_allclose(u, np.empty(shape=(3, 0, 0), dtype=np.complex64))
a = np.empty([0, 0, 3])
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(0, 0, 0)))
assert_allclose(l, np.empty(shape=(0, 0, 0)))
assert_allclose(u, np.empty(shape=(0, 0, 3)))
with assert_raises(ValueError, match='at least two-dimensional'):
lu(np.array([]))
a = np.array([[]])
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(0, 0)))
assert_allclose(l, np.empty(shape=(1, 0)))
assert_allclose(u, np.empty(shape=(0, 0)))
a = np.array([[[]]])
p, l, u = lu(a)
assert_allclose(p, np.empty(shape=(1, 0, 0)))
assert_allclose(l, np.empty(shape=(1, 1, 0)))
assert_allclose(u, np.empty(shape=(1, 0, 0)))
class TestLUFactor:
def setup_method(self):
self.rng = np.random.default_rng(1682281250228846)
self.a = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
self.ca = np.array([[1, 2, 3], [1, 2, 3], [2, 5j, 6]])
# Those matrices are more robust to detect problems in permutation
# matrices than the ones above
self.b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
self.cb = np.array([[1j, 2j, 3j], [4j, 5j, 6j], [7j, 8j, 9j]])
# Reectangular matrices
self.hrect = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = np.array([[1, 2, 3, 4], [5, 6, 7, 8],
[9, 10, 12, 12]]) * 1.j
self.vrect = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 12, 12]])
# Medium sizes matrices
self.med = self.rng.random((30, 40))
self.cmed = self.rng.random((30, 40)) + 1.j*self.rng.random((30, 40))
def _test_common_lu_factor(self, data):
l_and_u1, piv1 = lu_factor(data)
(getrf,) = get_lapack_funcs(("getrf",), (data,))
l_and_u2, piv2, _ = getrf(data, overwrite_a=False)
assert_allclose(l_and_u1, l_and_u2)
assert_allclose(piv1, piv2)
# Simple tests.
# For lu_factor gives a LinAlgWarning because these matrices are singular
def test_hrectangular(self):
self._test_common_lu_factor(self.hrect)
def test_vrectangular(self):
self._test_common_lu_factor(self.vrect)
def test_hrectangular_complex(self):
self._test_common_lu_factor(self.chrect)
def test_vrectangular_complex(self):
self._test_common_lu_factor(self.cvrect)
# Bigger matrices
def test_medium1(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common_lu_factor(self.med)
def test_medium1_complex(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common_lu_factor(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_allclose(p @ l @ u, self.a)
def test_simple_known(self):
# Ticket #1458
for order in ['C', 'F']:
A = np.array([[2, 1], [0, 1.]], order=order)
LU, P = lu_factor(A)
assert_allclose(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
class TestLUSolve:
def setup_method(self):
self.rng = np.random.default_rng(1682281250228846)
def test_lu(self):
a0 = self.rng.random((10, 10))
b = self.rng.random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a, b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a, b)
assert_allclose(x1, x2)
def test_check_finite(self):
a = self.rng.random((10, 10))
b = self.rng.random((10,))
x1 = solve(a, b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a, b, check_finite=False)
assert_allclose(x1, x2)
| 11,186
| 40.742537
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_basic.py
|
import itertools
import warnings
import numpy as np
from numpy import (arange, array, dot, zeros, identity, conjugate, transpose,
float32)
from numpy.random import random
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_array_almost_equal, assert_allclose,
assert_array_equal, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy.linalg import (solve, inv, det, lstsq, pinv, pinvh, norm,
solve_banded, solveh_banded, solve_triangular,
solve_circulant, circulant, LinAlgError, block_diag,
matrix_balance, qr, LinAlgWarning)
from scipy.linalg._testutils import assert_no_overwrite
from scipy._lib._testutils import check_free_memory, IS_MUSL
from scipy.linalg.blas import HAS_ILP64
from scipy._lib.deprecation import _NoValue
REAL_DTYPES = (np.float32, np.float64, np.longdouble)
COMPLEX_DTYPES = (np.complex64, np.complex128, np.clongdouble)
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def _eps_cast(dtyp):
"""Get the epsilon for dtype, possibly downcast to BLAS types."""
dt = dtyp
if dt == np.longdouble:
dt = np.float64
elif dt == np.clongdouble:
dt = np.complex128
return np.finfo(dt).eps
class TestSolveBanded:
def test_real(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l, u = 2, 1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2j, 1, 20, 2j],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2j, -1, 0, 0]])
l, u = 2, 1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_real(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
ab[2, :-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_complex(self):
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
ab[2, :-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l, u = 2, 1
b4 = array([10.0, 0.0, 2.0, 14.0])
x = solve_banded((l, u), ab, b4, check_finite=False)
assert_array_almost_equal(dot(a, x), b4)
def test_bad_shape(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l, u = 2, 1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
def test_1x1(self):
b = array([[1., 2., 3.]])
x = solve_banded((1, 1), [[0], [2], [0]], b)
assert_array_equal(x, [[0.5, 1.0, 1.5]])
assert_equal(x.dtype, np.dtype('f8'))
assert_array_equal(b, [[1.0, 2.0, 3.0]])
def test_native_list_arguments(self):
a = [[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]]
ab = [[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]]
l, u = 2, 1
b = [10.0, 0.0, 2.0, 14.0]
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
class TestSolveHBanded:
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 2 0] [2-j]
# [ j 4 -j 2] X = [4-j]
# [ 2 j 4 -j] [4+j]
# [ 0 2 j 4] [2+j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
def test_02_complex(self):
# Solve
# [ 4 -j 2 0] [2-j 2+4j]
# [ j 4 -j 2] X = [4-j -1-j]
# [ 2 j 4 -j] [4+j 4+2j]
# [ 0 2 j 4] [2+j j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([[2-1j, 2+4j],
[4.0-1j, -1-1j],
[4.0+1j, 4+2j],
[2+1j, 1j]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1, 1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1))
def test_tridiag_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_lower(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_float32(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_float32(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_complex(self):
# Solve
# [ 4 -j 0] [ -j]
# [ j 4 -j] X = [4-j]
# [ 0 j 4] [4+j]
#
ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
b = array([-1.0j, 4.0-1j, 4+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0])
def test_tridiag_02_complex(self):
# Solve
# [ 4 -j 0] [ -j 4j]
# [ j 4 -j] X = [4-j -1-j]
# [ 0 j 4] [4+j 4 ]
#
ab = array([[-99, -1.0j, -1.0j],
[4.0, 4.0, 4.0]])
b = array([[-1j, 4.0j],
[4.0-1j, -1.0-1j],
[4.0+1j, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_check_finite(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, check_finite=False)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_bad_shapes(self):
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0]])
assert_raises(ValueError, solveh_banded, ab, b)
assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
assert_raises(ValueError, solveh_banded, ab, [1.0])
def test_1x1(self):
x = solveh_banded([[1]], [[1, 2, 3]])
assert_array_equal(x, [[1.0, 2.0, 3.0]])
assert_equal(x.dtype, np.dtype('f8'))
def test_native_list_arguments(self):
# Same as test_01_upper, using python's native list.
ab = [[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]]
b = [1.0, 4.0, 1.0, 2.0]
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
class TestSolve:
def setup_method(self):
np.random.seed(1234)
def test_20Feb04_bug(self):
a = [[1, 1], [1.0, 0]] # ok
x0 = solve(a, [1, 0j])
assert_array_almost_equal(dot(a, x0), [1, 0])
# gives failure with clapack.zgesv(..,rowmajor=0)
a = [[1, 1], [1.2, 0]]
b = [1, 0j]
x0 = solve(a, b)
assert_array_almost_equal(dot(a, x0), [1, 0])
def test_simple(self):
a = [[1, 20], [-30, 4]]
for b in ([[1, 0], [0, 1]],
[1, 0],
[[2, 1], [-30, 4]]
):
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_simple_complex(self):
a = array([[5, 2], [2j, 4]], 'D')
for b in ([1j, 0],
[[1j, 1j], [0, 2]],
[1, 0j],
array([1, 0], 'D'),
):
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_simple_pos(self):
a = [[2, 3], [3, 5]]
for lower in [0, 1]:
for b in ([[1, 0], [0, 1]],
[1, 0]
):
x = solve(a, b, assume_a='pos', lower=lower)
assert_array_almost_equal(dot(a, x), b)
def test_simple_pos_complexb(self):
a = [[5, 2], [2, 4]]
for b in ([1j, 0],
[[1j, 1j], [0, 2]],
):
x = solve(a, b, assume_a='pos')
assert_array_almost_equal(dot(a, x), b)
def test_simple_sym(self):
a = [[2, 3], [3, -5]]
for lower in [0, 1]:
for b in ([[1, 0], [0, 1]],
[1, 0]
):
x = solve(a, b, assume_a='sym', lower=lower)
assert_array_almost_equal(dot(a, x), b)
def test_simple_sym_complexb(self):
a = [[5, 2], [2, -4]]
for b in ([1j, 0],
[[1j, 1j], [0, 2]]
):
x = solve(a, b, assume_a='sym')
assert_array_almost_equal(dot(a, x), b)
def test_simple_sym_complex(self):
a = [[5, 2+1j], [2+1j, -4]]
for b in ([1j, 0],
[1, 0],
[[1j, 1j], [0, 2]]
):
x = solve(a, b, assume_a='sym')
assert_array_almost_equal(dot(a, x), b)
def test_simple_her_actuallysym(self):
a = [[2, 3], [3, -5]]
for lower in [0, 1]:
for b in ([[1, 0], [0, 1]],
[1, 0],
[1j, 0],
):
x = solve(a, b, assume_a='her', lower=lower)
assert_array_almost_equal(dot(a, x), b)
def test_simple_her(self):
a = [[5, 2+1j], [2-1j, -4]]
for b in ([1j, 0],
[1, 0],
[[1j, 1j], [0, 2]]
):
x = solve(a, b, assume_a='her')
assert_array_almost_equal(dot(a, x), b)
def test_nils_20Feb04(self):
n = 2
A = random([n, n])+random([n, n])*1j
X = zeros((n, n), 'D')
Ainv = inv(A)
R = identity(n)+identity(n)*0j
for i in arange(0, n):
r = R[:, i]
X[:, i] = solve(A, r)
assert_array_almost_equal(X, Ainv)
def test_random(self):
n = 20
a = random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
for i in range(4):
b = random([n, 3])
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_random_complex(self):
n = 20
a = random([n, n]) + 1j * random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
for i in range(2):
b = random([n, 3])
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_random_sym(self):
n = 20
a = random([n, n])
for i in range(n):
a[i, i] = abs(20*(.1+a[i, i]))
for j in range(i):
a[i, j] = a[j, i]
for i in range(4):
b = random([n])
x = solve(a, b, assume_a="pos")
assert_array_almost_equal(dot(a, x), b)
def test_random_sym_complex(self):
n = 20
a = random([n, n])
a = a + 1j*random([n, n])
for i in range(n):
a[i, i] = abs(20*(.1+a[i, i]))
for j in range(i):
a[i, j] = conjugate(a[j, i])
b = random([n])+2j*random([n])
for i in range(2):
x = solve(a, b, assume_a="pos")
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = [[1, 20], [-30, 4]]
for b in ([[1, 0], [0, 1]], [1, 0],
[[2, 1], [-30, 4]]):
x = solve(a, b, check_finite=False)
assert_array_almost_equal(dot(a, x), b)
def test_scalar_a_and_1D_b(self):
a = 1
b = [1, 2, 3]
x = solve(a, b)
assert_array_almost_equal(x.ravel(), b)
assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape')
def test_simple2(self):
a = np.array([[1.80, 2.88, 2.05, -0.89],
[525.00, -295.00, -95.00, -380.00],
[1.58, -2.69, -2.90, -1.04],
[-1.11, -0.66, -0.59, 0.80]])
b = np.array([[9.52, 18.47],
[2435.00, 225.00],
[0.77, -13.28],
[-6.22, -6.21]])
x = solve(a, b)
assert_array_almost_equal(x, np.array([[1., -1, 3, -5],
[3, 2, 4, 1]]).T)
def test_simple_complex2(self):
a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j],
[-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j],
[-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j],
[2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]])
b = np.array([[26.26+51.78j, 31.32-6.70j],
[64.30-86.80j, 158.60-14.20j],
[-5.75+25.31j, -2.15+30.19j],
[1.16+2.57j, -2.56+7.55j]])
x = solve(a, b)
assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j],
[2-3.j, 5+1.j],
[-4-5.j, -3+4.j],
[6.j, 2-3.j]]))
def test_hermitian(self):
# An upper triangular matrix will be used for hermitian matrix a
a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j],
[0, -4.63, -1.84+0.03j, 2.21+0.21j],
[0, 0, -8.87, 1.58-0.90j],
[0, 0, 0, -1.36]])
b = np.array([[2.98-10.18j, 28.68-39.89j],
[-9.58+3.88j, -24.79-8.40j],
[-0.77-16.05j, 4.23-70.02j],
[7.79+5.48j, -35.39+18.01j]])
res = np.array([[2.+1j, -8+6j],
[3.-2j, 7-2j],
[-1+2j, -1+5j],
[1.-1j, 3-4j]])
x = solve(a, b, assume_a='her')
assert_array_almost_equal(x, res)
# Also conjugate a and test for lower triangular data
x = solve(a.conj().T, b, assume_a='her', lower=True)
assert_array_almost_equal(x, res)
def test_pos_and_sym(self):
A = np.arange(1, 10).reshape(3, 3)
x = solve(np.tril(A)/9, np.ones(3), assume_a='pos')
assert_array_almost_equal(x, [9., 1.8, 1.])
x = solve(np.tril(A)/9, np.ones(3), assume_a='sym')
assert_array_almost_equal(x, [9., 1.8, 1.])
def test_singularity(self):
a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]])
b = np.arange(9)[:, None]
assert_raises(LinAlgError, solve, a, b)
def test_ill_condition_warning(self):
a = np.array([[1, 1], [1+1e-16, 1-1e-16]])
b = np.ones(2)
with warnings.catch_warnings():
warnings.simplefilter('error')
assert_raises(LinAlgWarning, solve, a, b)
def test_empty_rhs(self):
a = np.eye(2)
b = [[], []]
x = solve(a, b)
assert_(x.size == 0, 'Returned array is not empty')
assert_(x.shape == (2, 0), 'Returned empty array shape is wrong')
def test_multiple_rhs(self):
a = np.eye(2)
b = np.random.rand(2, 3, 4)
x = solve(a, b)
assert_array_almost_equal(x, b)
def test_transposed_keyword(self):
A = np.arange(9).reshape(3, 3) + 1
x = solve(np.tril(A)/9, np.ones(3), transposed=True)
assert_array_almost_equal(x, [1.2, 0.2, 1])
x = solve(np.tril(A)/9, np.ones(3), transposed=False)
assert_array_almost_equal(x, [9, -5.4, -1.2])
def test_transposed_notimplemented(self):
a = np.eye(3).astype(complex)
with assert_raises(NotImplementedError):
solve(a, a, transposed=True)
def test_nonsquare_a(self):
assert_raises(ValueError, solve, [1, 2], 1)
def test_size_mismatch_with_1D_b(self):
assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3))
assert_raises(ValueError, solve, np.eye(3), np.ones(4))
def test_assume_a_keyword(self):
assert_raises(ValueError, solve, 1, 1, assume_a='zxcv')
@pytest.mark.skip(reason="Failure on OS X (gh-7500), "
"crash on Windows (gh-8064)")
def test_all_type_size_routine_combinations(self):
sizes = [10, 100]
assume_as = ['gen', 'sym', 'pos', 'her']
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for size, assume_a, dtype in itertools.product(sizes, assume_as,
dtypes):
is_complex = dtype in (np.complex64, np.complex128)
if assume_a == 'her' and not is_complex:
continue
err_msg = ("Failed for size: {}, assume_a: {},"
"dtype: {}".format(size, assume_a, dtype))
a = np.random.randn(size, size).astype(dtype)
b = np.random.randn(size).astype(dtype)
if is_complex:
a = a + (1j*np.random.randn(size, size)).astype(dtype)
if assume_a == 'sym': # Can still be complex but only symmetric
a = a + a.T
elif assume_a == 'her': # Handle hermitian matrices here instead
a = a + a.T.conj()
elif assume_a == 'pos':
a = a.conj().T.dot(a) + 0.1*np.eye(size)
tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6
if assume_a in ['gen', 'sym', 'her']:
# We revert the tolerance from before
# 4b4a6e7c34fa4060533db38f9a819b98fa81476c
if dtype in (np.float32, np.complex64):
tol *= 10
x = solve(a, b, assume_a=assume_a)
assert_allclose(a.dot(x), b,
atol=tol * size,
rtol=tol * size,
err_msg=err_msg)
if assume_a == 'sym' and dtype not in (np.complex64,
np.complex128):
x = solve(a, b, assume_a=assume_a, transposed=True)
assert_allclose(a.dot(x), b,
atol=tol * size,
rtol=tol * size,
err_msg=err_msg)
class TestSolveTriangular:
def test_simple(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1, 0], [1, 2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True)
assert_array_almost_equal(sol, [1, 0])
# check that it works also for non-contiguous matrices
sol = solve_triangular(A.T, b, lower=False)
assert_array_almost_equal(sol, [.5, .5])
# and that it gives the same result as trans=1
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [.5, .5])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
def test_simple_complex(self):
"""
solve_triangular on a simple 2x2 complex matrix
"""
A = array([[1+1j, 0], [1j, 2]])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
# check other option combinations with complex rhs
b = np.diag([1+1j, 1+2j])
sol = solve_triangular(A, b, lower=True, trans=0)
assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
sol = solve_triangular(A, b, lower=True, trans=2)
assert_array_almost_equal(sol, [[1j, -0.75-0.25j], [0, 0.5+1j]])
sol = solve_triangular(A.T, b, lower=False, trans=0)
assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
sol = solve_triangular(A.T, b, lower=False, trans=1)
assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
sol = solve_triangular(A.T, b, lower=False, trans=2)
assert_array_almost_equal(sol, [[1j, 0], [-0.5, 0.5+1j]])
def test_check_finite(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1, 0], [1, 2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True, check_finite=False)
assert_array_almost_equal(sol, [1, 0])
class TestInv:
def setup_method(self):
np.random.seed(1234)
def test_simple(self):
a = [[1, 2], [3, 4]]
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv), np.eye(2))
a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]]
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv), np.eye(3))
def test_random(self):
n = 20
for i in range(4):
a = random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv),
identity(n))
def test_simple_complex(self):
a = [[1, 2], [3, 4j]]
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
def test_random_complex(self):
n = 20
for i in range(4):
a = random([n, n])+2j*random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv),
identity(n))
def test_check_finite(self):
a = [[1, 2], [3, 4]]
a_inv = inv(a, check_finite=False)
assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
class TestDet:
def setup_method(self):
self.rng = np.random.default_rng(1680305949878959)
def test_1x1_all_singleton_dims(self):
a = np.array([[1]])
deta = det(a)
assert deta.dtype.char == 'd'
assert np.isscalar(deta)
assert deta == 1.
a = np.array([[[[1]]]], dtype='f')
deta = det(a)
assert deta.dtype.char == 'd'
assert np.isscalar(deta)
assert deta == 1.
a = np.array([[[1 + 3.j]]], dtype=np.complex64)
deta = det(a)
assert deta.dtype.char == 'D'
assert np.isscalar(deta)
assert deta == 1.+3.j
def test_1by1_stacked_input_output(self):
a = self.rng.random([4, 5, 1, 1], dtype=np.float32)
deta = det(a)
assert deta.dtype.char == 'd'
assert deta.shape == (4, 5)
assert_allclose(deta, np.squeeze(a))
a = self.rng.random([4, 5, 1, 1], dtype=np.float32)*np.complex64(1.j)
deta = det(a)
assert deta.dtype.char == 'D'
assert deta.shape == (4, 5)
assert_allclose(deta, np.squeeze(a))
@pytest.mark.parametrize('shape', [[2, 2], [20, 20], [3, 2, 20, 20]])
def test_simple_det_shapes_real_complex(self, shape):
a = self.rng.uniform(-1., 1., size=shape)
d1, d2 = det(a), np.linalg.det(a)
assert_allclose(d1, d2)
b = self.rng.uniform(-1., 1., size=shape)*1j
b += self.rng.uniform(-0.5, 0.5, size=shape)
d3, d4 = det(b), np.linalg.det(b)
assert_allclose(d3, d4)
def test_for_known_det_values(self):
# Hadamard8
a = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, -1, 1, -1, 1, -1, 1, -1],
[1, 1, -1, -1, 1, 1, -1, -1],
[1, -1, -1, 1, 1, -1, -1, 1],
[1, 1, 1, 1, -1, -1, -1, -1],
[1, -1, 1, -1, -1, 1, -1, 1],
[1, 1, -1, -1, -1, -1, 1, 1],
[1, -1, -1, 1, -1, 1, 1, -1]])
assert_allclose(det(a), 4096.)
# consecutive number array always singular
assert_allclose(det(np.arange(25).reshape(5, 5)), 0.)
# simple anti-diagonal block array
# Upper right has det (-2+1j) and lower right has (-2-1j)
# det(a) = - (-2+1j) (-2-1j) = 5.
a = np.array([[0.+0.j, 0.+0.j, 0.-1.j, 1.-1.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.-1.j],
[0.+1.j, 1.+1.j, 0.+0.j, 0.+0.j],
[1.+0.j, 0.+1.j, 0.+0.j, 0.+0.j]], dtype=np.complex64)
assert_allclose(det(a), 5.+0.j)
# Fiedler companion complexified
# >>> a = scipy.linalg.fiedler_companion(np.arange(1, 10))
a = np.array([[-2., -3., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0.],
[0., -4., 0., -5., 1., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., -6., 0., -7., 1., 0.],
[0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., -8., 0., -9.],
[0., 0., 0., 0., 0., 1., 0., 0.]])*1.j
assert_allclose(det(a), 9.)
# g and G dtypes are handled differently in windows and other platforms
@pytest.mark.parametrize('typ', [x for x in np.typecodes['All'][:20]
if x not in 'gG'])
def test_sample_compatible_dtype_input(self, typ):
n = 4
a = self.rng.random([n, n]).astype(typ) # value is not important
assert isinstance(det(a), (np.float64, np.complex128))
def test_incompatible_dtype_input(self):
# Double backslashes needed for escaping pytest regex.
msg = 'cannot be cast to float\\(32, 64\\)'
for c, t in zip('SUO', ['bytes8', 'str32', 'object']):
with assert_raises(TypeError, match=msg):
det(np.array([['a', 'b']]*2, dtype=c))
with assert_raises(TypeError, match=msg):
det(np.array([[b'a', b'b']]*2, dtype='V'))
with assert_raises(TypeError, match=msg):
det(np.array([[100, 200]]*2, dtype='datetime64[s]'))
with assert_raises(TypeError, match=msg):
det(np.array([[100, 200]]*2, dtype='timedelta64[s]'))
def test_empty_edge_cases(self):
assert_allclose(det(np.empty([0, 0])), 1.)
assert_allclose(det(np.empty([0, 0, 0])), np.array([]))
assert_allclose(det(np.empty([3, 0, 0])), np.array([1., 1., 1.]))
with assert_raises(ValueError, match='Last 2 dimensions'):
det(np.empty([0, 0, 3]))
with assert_raises(ValueError, match='at least two-dimensional'):
det(np.array([]))
with assert_raises(ValueError, match='Last 2 dimensions'):
det(np.array([[]]))
with assert_raises(ValueError, match='Last 2 dimensions'):
det(np.array([[[]]]))
def test_overwrite_a(self):
# If all conditions are met then input should be overwritten;
# - dtype is one of 'fdFD'
# - C-contiguous
# - writeable
a = np.arange(9).reshape(3, 3).astype(np.float32)
ac = a.copy()
deta = det(ac, overwrite_a=True)
assert_allclose(deta, 0.)
assert not (a == ac).all()
def test_readonly_array(self):
a = np.array([[2., 0., 1.], [5., 3., -1.], [1., 1., 1.]])
a.setflags(write=False)
# overwrite_a will be overridden
assert_allclose(det(a, overwrite_a=True), 10.)
def test_simple_check_finite(self):
a = [[1, 2], [3, np.inf]]
with assert_raises(ValueError, match='array must not contain'):
det(a)
def direct_lstsq(a, b, cmplx=0):
at = transpose(a)
if cmplx:
at = conjugate(at)
a1 = dot(at, a)
b1 = dot(at, b)
return solve(a1, b1)
class TestLstsq:
lapack_drivers = ('gelsd', 'gelss', 'gelsy', None)
def test_simple_exact(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 20], [-30, 4]], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
for bt in (((1, 0), (0, 1)), (1, 0),
((2, 1), (-30, 4))):
# Store values in case they are overwritten
# later
a1 = a.copy()
b = np.array(bt, dtype=dtype)
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2,
'expected efficient rank 2, got %s' % r)
assert_allclose(dot(a, x), b,
atol=25 * _eps_cast(a1.dtype),
rtol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_simple_overdet(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype)
b = np.array([1, 2, 3], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
if lapack_driver == 'gelsy':
residuals = np.sum((b - a.dot(x))**2)
else:
residuals = out[1]
r = out[2]
assert_(r == 2, 'expected efficient rank 2, got %s' % r)
assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
residuals,
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
assert_allclose(x, (-0.428571428571429, 0.85714285714285),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_simple_overdet_complex(self):
for dtype in COMPLEX_DTYPES:
a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype)
b = np.array([1, 2+4j, 3], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
if lapack_driver == 'gelsy':
res = b - a.dot(x)
residuals = np.sum(res * res.conj())
else:
residuals = out[1]
r = out[2]
assert_(r == 2, 'expected efficient rank 2, got %s' % r)
assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
residuals,
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
assert_allclose(
x, (-0.4831460674157303 + 0.258426966292135j,
0.921348314606741 + 0.292134831460674j),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_simple_underdet(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
b = np.array([1, 2], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2, 'expected efficient rank 2, got %s' % r)
assert_allclose(x, (-0.055555555555555, 0.111111111111111,
0.277777777777777),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_random_exact(self):
rng = np.random.RandomState(1234)
for dtype in REAL_DTYPES:
for n in (20, 200):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(rng.random([n, n]), dtype=dtype)
for i in range(n):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(4):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == n, 'expected efficient rank {}, '
'got {}'.format(n, r))
if dtype is np.float32:
assert_allclose(
dot(a, x), b,
rtol=500 * _eps_cast(a1.dtype),
atol=500 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
else:
assert_allclose(
dot(a, x), b,
rtol=1000 * _eps_cast(a1.dtype),
atol=1000 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
@pytest.mark.skipif(IS_MUSL, reason="may segfault on Alpine, see gh-17630")
def test_random_complex_exact(self):
rng = np.random.RandomState(1234)
for dtype in COMPLEX_DTYPES:
for n in (20, 200):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(rng.random([n, n]) + 1j*rng.random([n, n]),
dtype=dtype)
for i in range(n):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(2):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == n, 'expected efficient rank {}, '
'got {}'.format(n, r))
if dtype is np.complex64:
assert_allclose(
dot(a, x), b,
rtol=400 * _eps_cast(a1.dtype),
atol=400 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
else:
assert_allclose(
dot(a, x), b,
rtol=1000 * _eps_cast(a1.dtype),
atol=1000 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_random_overdet(self):
rng = np.random.RandomState(1234)
for dtype in REAL_DTYPES:
for (n, m) in ((20, 15), (200, 2)):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(rng.random([n, m]), dtype=dtype)
for i in range(m):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(4):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == m, 'expected efficient rank {}, '
'got {}'.format(m, r))
assert_allclose(
x, direct_lstsq(a, b, cmplx=0),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_random_complex_overdet(self):
rng = np.random.RandomState(1234)
for dtype in COMPLEX_DTYPES:
for (n, m) in ((20, 15), (200, 2)):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(rng.random([n, m]) + 1j*rng.random([n, m]),
dtype=dtype)
for i in range(m):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(2):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten
# later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == m, 'expected efficient rank {}, '
'got {}'.format(m, r))
assert_allclose(
x, direct_lstsq(a, b, cmplx=1),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg="driver: %s" % lapack_driver)
def test_check_finite(self):
with suppress_warnings() as sup:
# On (some) OSX this tests triggers a warning (gh-7538)
sup.filter(RuntimeWarning,
"internal gelsd driver lwork query error,.*"
"Falling back to 'gelss' driver.")
at = np.array(((1, 20), (-30, 4)))
for dtype, bt, lapack_driver, overwrite, check_finite in \
itertools.product(REAL_DTYPES,
(((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))),
TestLstsq.lapack_drivers,
(True, False),
(True, False)):
a = at.astype(dtype)
b = np.array(bt, dtype=dtype)
# Store values in case they are overwritten
# later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
check_finite=check_finite, overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2, 'expected efficient rank 2, got %s' % r)
assert_allclose(dot(a, x), b,
rtol=25 * _eps_cast(a.dtype),
atol=25 * _eps_cast(a.dtype),
err_msg="driver: %s" % lapack_driver)
def test_zero_size(self):
for a_shape, b_shape in (((0, 2), (0,)),
((0, 4), (0, 2)),
((4, 0), (4,)),
((4, 0), (4, 2))):
b = np.ones(b_shape)
x, residues, rank, s = lstsq(np.zeros(a_shape), b)
assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:]))
residues_should_be = (np.empty((0,)) if a_shape[1]
else np.linalg.norm(b, axis=0)**2)
assert_equal(residues, residues_should_be)
assert_(rank == 0, 'expected rank 0')
assert_equal(s, np.empty((0,)))
class TestPinv:
def setup_method(self):
np.random.seed(1234)
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
dtype=float))
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_simple_singular(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a_pinv = pinv(a)
expected = array([[-6.38888889e-01, -1.66666667e-01, 3.05555556e-01],
[-5.55555556e-02, 1.30136518e-16, 5.55555556e-02],
[5.27777778e-01, 1.66666667e-01, -1.94444444e-01]])
assert_array_almost_equal(a_pinv, expected)
def test_simple_cols(self):
a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
a_pinv = pinv(a)
expected = array([[-0.94444444, 0.44444444],
[-0.11111111, 0.11111111],
[0.72222222, -0.22222222]])
assert_array_almost_equal(a_pinv, expected)
def test_simple_rows(self):
a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
a_pinv = pinv(a)
expected = array([[-1.33333333, -0.33333333, 0.66666667],
[1.08333333, 0.33333333, -0.41666667]])
assert_array_almost_equal(a_pinv, expected)
def test_check_finite(self):
a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])
a_pinv = pinv(a, check_finite=False)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_native_list_argument(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a_pinv = pinv(a)
expected = array([[-6.38888889e-01, -1.66666667e-01, 3.05555556e-01],
[-5.55555556e-02, 1.30136518e-16, 5.55555556e-02],
[5.27777778e-01, 1.66666667e-01, -1.94444444e-01]])
assert_array_almost_equal(a_pinv, expected)
def test_atol_rtol(self):
n = 12
# get a random ortho matrix for shuffling
q, _ = qr(np.random.rand(n, n))
a_m = np.arange(35.0).reshape(7, 5)
a = a_m.copy()
a[0, 0] = 0.001
atol = 1e-5
rtol = 0.05
# svds of a_m is ~ [116.906, 4.234, tiny, tiny, tiny]
# svds of a is ~ [116.906, 4.234, 4.62959e-04, tiny, tiny]
# Just abs cutoff such that we arrive at a_modified
a_p = pinv(a_m, atol=atol, rtol=0.)
adiff1 = a @ a_p @ a - a
adiff2 = a_m @ a_p @ a_m - a_m
# Now adiff1 should be around atol value while adiff2 should be
# relatively tiny
assert_allclose(np.linalg.norm(adiff1), 5e-4, atol=5.e-4)
assert_allclose(np.linalg.norm(adiff2), 5e-14, atol=5.e-14)
# Now do the same but remove another sv ~4.234 via rtol
a_p = pinv(a_m, atol=atol, rtol=rtol)
adiff1 = a @ a_p @ a - a
adiff2 = a_m @ a_p @ a_m - a_m
assert_allclose(np.linalg.norm(adiff1), 4.233, rtol=0.01)
assert_allclose(np.linalg.norm(adiff2), 4.233, rtol=0.01)
@pytest.mark.parametrize("cond", [1, None, _NoValue])
@pytest.mark.parametrize("rcond", [1, None, _NoValue])
def test_deprecation(self, cond, rcond):
if cond is _NoValue and rcond is _NoValue:
# the defaults if cond/rcond aren't set -> no warning
pinv(np.ones((2,2)), cond=cond, rcond=rcond)
else:
# at least one of cond/rcond has a user-supplied value -> warn
with pytest.deprecated_call(match='"cond" and "rcond"'):
pinv(np.ones((2,2)), cond=cond, rcond=rcond)
class TestPinvSymmetric:
def setup_method(self):
np.random.seed(1234)
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
dtype=float))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_native_list_argument(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a.tolist())
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_atol_rtol(self):
n = 12
# get a random ortho matrix for shuffling
q, _ = qr(np.random.rand(n, n))
a = np.diag([4, 3, 2, 1, 0.99e-4, 0.99e-5] + [0.99e-6]*(n-6))
a = q.T @ a @ q
a_m = np.diag([4, 3, 2, 1, 0.99e-4, 0.] + [0.]*(n-6))
a_m = q.T @ a_m @ q
atol = 1e-5
rtol = (4.01e-4 - 4e-5)/4
# Just abs cutoff such that we arrive at a_modified
a_p = pinvh(a, atol=atol, rtol=0.)
adiff1 = a @ a_p @ a - a
adiff2 = a_m @ a_p @ a_m - a_m
# Now adiff1 should dance around atol value since truncation
# while adiff2 should be relatively tiny
assert_allclose(norm(adiff1), atol, rtol=0.1)
assert_allclose(norm(adiff2), 1e-12, atol=1e-11)
# Now do the same but through rtol cancelling atol value
a_p = pinvh(a, atol=atol, rtol=rtol)
adiff1 = a @ a_p @ a - a
adiff2 = a_m @ a_p @ a_m - a_m
# adiff1 and adiff2 should be elevated to ~1e-4 due to mismatch
assert_allclose(norm(adiff1), 1e-4, rtol=0.1)
assert_allclose(norm(adiff2), 1e-4, rtol=0.1)
@pytest.mark.parametrize('scale', (1e-20, 1., 1e20))
@pytest.mark.parametrize('pinv_', (pinv, pinvh))
def test_auto_rcond(scale, pinv_):
x = np.array([[1, 0], [0, 1e-10]]) * scale
expected = np.diag(1. / np.diag(x))
x_inv = pinv_(x)
assert_allclose(x_inv, expected)
class TestVectorNorms:
def test_types(self):
for dtype in np.typecodes['AllFloat']:
x = np.array([1, 2, 3], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
for dtype in np.typecodes['Complex']:
x = np.array([1j, 2j, 3j], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
def test_overflow(self):
# unlike numpy's norm, this one is
# safer on overflow
a = array([1e20], dtype=float32)
assert_almost_equal(norm(a), a)
def test_stable(self):
# more stable than numpy's norm
a = array([1e4] + [1]*10000, dtype=float32)
try:
# snrm in double precision; we obtain the same as for float64
# -- large atol needed due to varying blas implementations
assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)
except AssertionError:
# snrm implemented in single precision, == np.linalg.norm result
msg = ": Result should equal either 0.0 or 0.5 (depending on " \
"implementation of snrm2)."
assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)
def test_zero_norm(self):
assert_equal(norm([1, 0, 3], 0), 2)
assert_equal(norm([1, 2, 3], 0), 3)
def test_axis_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2)
assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2)
def test_keepdims_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
b = norm(a, axis=1, keepdims=True)
assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2)
assert_(b.shape == (2, 1, 2))
assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2)
@pytest.mark.skipif(not HAS_ILP64, reason="64-bit BLAS required")
def test_large_vector(self):
check_free_memory(free_mb=17000)
x = np.zeros([2**31], dtype=np.float64)
x[-1] = 1
res = norm(x)
del x
assert_allclose(res, 1.0)
class TestMatrixNorms:
def test_matrix_norms(self):
# Not all of these are matrix norms in the most technical sense.
np.random.seed(1234)
for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4):
for t in np.single, np.double, np.csingle, np.cdouble, np.int64:
A = 10 * np.random.randn(n, m).astype(t)
if np.issubdtype(A.dtype, np.complexfloating):
A = (A + 10j * np.random.randn(n, m)).astype(t)
t_high = np.cdouble
else:
t_high = np.double
for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf):
actual = norm(A, ord=order)
desired = np.linalg.norm(A, ord=order)
# SciPy may return higher precision matrix norms.
# This is a consequence of using LAPACK.
if not np.allclose(actual, desired):
desired = np.linalg.norm(A.astype(t_high), ord=order)
assert_allclose(actual, desired)
def test_axis_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
b = norm(a, ord=np.inf, axis=(1, 0))
c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1))
d = norm(a, ord=1, axis=(0, 1))
assert_allclose(b, c)
assert_allclose(c, d)
assert_allclose(b, d)
assert_(b.shape == c.shape == d.shape)
b = norm(a, ord=1, axis=(1, 0))
c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1))
d = norm(a, ord=np.inf, axis=(0, 1))
assert_allclose(b, c)
assert_allclose(c, d)
assert_allclose(b, d)
assert_(b.shape == c.shape == d.shape)
def test_keepdims_kwd(self):
a = np.arange(120, dtype='d').reshape(2, 3, 4, 5)
b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True)
c = norm(a, ord=1, axis=(0, 1), keepdims=True)
assert_allclose(b, c)
assert_(b.shape == c.shape)
class TestOverwrite:
def test_solve(self):
assert_no_overwrite(solve, [(3, 3), (3,)])
def test_solve_triangular(self):
assert_no_overwrite(solve_triangular, [(3, 3), (3,)])
def test_solve_banded(self):
assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b),
[(4, 6), (6,)])
def test_solveh_banded(self):
assert_no_overwrite(solveh_banded, [(2, 6), (6,)])
def test_inv(self):
assert_no_overwrite(inv, [(3, 3)])
def test_det(self):
assert_no_overwrite(det, [(3, 3)])
def test_lstsq(self):
assert_no_overwrite(lstsq, [(3, 2), (3,)])
def test_pinv(self):
assert_no_overwrite(pinv, [(3, 3)])
def test_pinvh(self):
assert_no_overwrite(pinvh, [(3, 3)])
class TestSolveCirculant:
def test_basic1(self):
c = np.array([1, 2, 3, 5])
b = np.array([1, -1, 1, 0])
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_basic2(self):
# b is a 2-d matrix.
c = np.array([1, 2, -3, -5])
b = np.arange(12).reshape(4, 3)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_basic3(self):
# b is a 3-d matrix.
c = np.array([1, 2, -3, -5])
b = np.arange(24).reshape(4, 3, 2)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_complex(self):
# Complex b and c
c = np.array([1+2j, -3, 4j, 5])
b = np.arange(8).reshape(4, 2) + 0.5j
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_random_b_and_c(self):
# Random b and c
np.random.seed(54321)
c = np.random.randn(50)
b = np.random.randn(50)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_singular(self):
# c gives a singular circulant matrix.
c = np.array([1, 1, 0, 0])
b = np.array([1, 2, 3, 4])
x = solve_circulant(c, b, singular='lstsq')
y, res, rnk, s = lstsq(circulant(c), b)
assert_allclose(x, y)
assert_raises(LinAlgError, solve_circulant, x, y)
def test_axis_args(self):
# Test use of caxis, baxis and outaxis.
# c has shape (2, 1, 4)
c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]])
# b has shape (3, 4)
b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]])
x = solve_circulant(c, b, baxis=1)
assert_equal(x.shape, (4, 2, 3))
expected = np.empty_like(x)
expected[:, 0, :] = solve(circulant(c[0]), b.T)
expected[:, 1, :] = solve(circulant(c[1]), b.T)
assert_allclose(x, expected)
x = solve_circulant(c, b, baxis=1, outaxis=-1)
assert_equal(x.shape, (2, 3, 4))
assert_allclose(np.moveaxis(x, -1, 0), expected)
# np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3).
x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1)
assert_equal(x.shape, (4, 2, 3))
assert_allclose(x, expected)
def test_native_list_arguments(self):
# Same as test_basic1 using python's native list.
c = [1, 2, 3, 5]
b = [1, -1, 1, 0]
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
class TestMatrix_Balance:
def test_string_arg(self):
assert_raises(ValueError, matrix_balance, 'Some string for fail')
def test_infnan_arg(self):
assert_raises(ValueError, matrix_balance,
np.array([[1, 2], [3, np.inf]]))
assert_raises(ValueError, matrix_balance,
np.array([[1, 2], [3, np.nan]]))
def test_scaling(self):
_, y = matrix_balance(np.array([[1000, 1], [1000, 0]]))
# Pre/post LAPACK 3.5.0 gives the same result up to an offset
# since in each case col norm is x1000 greater and
# 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5.
assert_allclose(np.diff(np.log2(np.diag(y))), [5])
def test_scaling_order(self):
A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]])
x, y = matrix_balance(A)
assert_allclose(solve(y, A).dot(y), x)
def test_separate(self):
_, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]),
separate=1)
assert_equal(np.diff(np.log2(y)), [5])
assert_allclose(z, np.arange(2))
def test_permutation(self):
A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))),
np.ones((3, 3)))
x, (y, z) = matrix_balance(A, separate=1)
assert_allclose(y, np.ones_like(y))
assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2]))
def test_perm_and_scaling(self):
# Matrix with its diagonal removed
cases = ( # Case 0
np.array([[0., 0., 0., 0., 0.000002],
[0., 0., 0., 0., 0.],
[2., 2., 0., 0., 0.],
[2., 2., 0., 0., 0.],
[0., 0., 0.000002, 0., 0.]]),
# Case 1 user reported GH-7258
np.array([[-0.5, 0., 0., 0.],
[0., -1., 0., 0.],
[1., 0., -0.5, 0.],
[0., 1., 0., -1.]]),
# Case 2 user reported GH-7258
np.array([[-3., 0., 1., 0.],
[-1., -1., -0., 1.],
[-3., -0., -0., 0.],
[-1., -0., 1., -1.]])
)
for A in cases:
x, y = matrix_balance(A)
x, (s, p) = matrix_balance(A, separate=1)
ip = np.empty_like(p)
ip[p] = np.arange(A.shape[0])
assert_allclose(y, np.diag(s)[ip, :])
assert_allclose(solve(y, A).dot(y), x)
| 69,820
| 37.490077
| 82
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_fblas.py
|
# Test interfaces to fortran blas.
#
# The tests are more of interface than they are of the underlying blas.
# Only very small matrices checked -- N=3 or so.
#
# !! Complex calculations really aren't checked that carefully.
# !! Only real valued complex numbers are used in tests.
from numpy import float32, float64, complex64, complex128, arange, array, \
zeros, shape, transpose, newaxis, common_type, conjugate
from scipy.linalg import _fblas as fblas
from numpy.testing import assert_array_equal, \
assert_allclose, assert_array_almost_equal, assert_
import pytest
# decimal accuracy to require between Python and LAPACK/BLAS calculations
accuracy = 5
# Since numpy.dot likely uses the same blas, use this routine
# to check.
def matrixmultiply(a, b):
if len(b.shape) == 1:
b_is_vector = True
b = b[:, newaxis]
else:
b_is_vector = False
assert_(a.shape[1] == b.shape[0])
c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
for i in range(a.shape[0]):
for j in range(b.shape[1]):
s = 0
for k in range(a.shape[1]):
s += a[i, k] * b[k, j]
c[i, j] = s
if b_is_vector:
c = c.reshape((a.shape[0],))
return c
##################################################
# Test blas ?axpy
class BaseAxpy:
''' Mixin class for axpy tests '''
def test_default_a(self):
x = arange(3., dtype=self.dtype)
y = arange(3., dtype=x.dtype)
real_y = x*1.+y
y = self.blas_func(x, y)
assert_array_equal(real_y, y)
def test_simple(self):
x = arange(3., dtype=self.dtype)
y = arange(3., dtype=x.dtype)
real_y = x*3.+y
y = self.blas_func(x, y, a=3.)
assert_array_equal(real_y, y)
def test_x_stride(self):
x = arange(6., dtype=self.dtype)
y = zeros(3, x.dtype)
y = arange(3., dtype=x.dtype)
real_y = x[::2]*3.+y
y = self.blas_func(x, y, a=3., n=3, incx=2)
assert_array_equal(real_y, y)
def test_y_stride(self):
x = arange(3., dtype=self.dtype)
y = zeros(6, x.dtype)
real_y = x*3.+y[::2]
y = self.blas_func(x, y, a=3., n=3, incy=2)
assert_array_equal(real_y, y[::2])
def test_x_and_y_stride(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
real_y = x[::4]*3.+y[::2]
y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2)
assert_array_equal(real_y, y[::2])
def test_x_bad_size(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(x, y, n=4, incx=5)
def test_y_bad_size(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(x, y, n=3, incy=5)
try:
class TestSaxpy(BaseAxpy):
blas_func = fblas.saxpy
dtype = float32
except AttributeError:
class TestSaxpy:
pass
class TestDaxpy(BaseAxpy):
blas_func = fblas.daxpy
dtype = float64
try:
class TestCaxpy(BaseAxpy):
blas_func = fblas.caxpy
dtype = complex64
except AttributeError:
class TestCaxpy:
pass
class TestZaxpy(BaseAxpy):
blas_func = fblas.zaxpy
dtype = complex128
##################################################
# Test blas ?scal
class BaseScal:
''' Mixin class for scal testing '''
def test_simple(self):
x = arange(3., dtype=self.dtype)
real_x = x*3.
x = self.blas_func(3., x)
assert_array_equal(real_x, x)
def test_x_stride(self):
x = arange(6., dtype=self.dtype)
real_x = x.copy()
real_x[::2] = x[::2]*array(3., self.dtype)
x = self.blas_func(3., x, n=3, incx=2)
assert_array_equal(real_x, x)
def test_x_bad_size(self):
x = arange(12., dtype=self.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(2., x, n=4, incx=5)
try:
class TestSscal(BaseScal):
blas_func = fblas.sscal
dtype = float32
except AttributeError:
class TestSscal:
pass
class TestDscal(BaseScal):
blas_func = fblas.dscal
dtype = float64
try:
class TestCscal(BaseScal):
blas_func = fblas.cscal
dtype = complex64
except AttributeError:
class TestCscal:
pass
class TestZscal(BaseScal):
blas_func = fblas.zscal
dtype = complex128
##################################################
# Test blas ?copy
class BaseCopy:
''' Mixin class for copy testing '''
def test_simple(self):
x = arange(3., dtype=self.dtype)
y = zeros(shape(x), x.dtype)
y = self.blas_func(x, y)
assert_array_equal(x, y)
def test_x_stride(self):
x = arange(6., dtype=self.dtype)
y = zeros(3, x.dtype)
y = self.blas_func(x, y, n=3, incx=2)
assert_array_equal(x[::2], y)
def test_y_stride(self):
x = arange(3., dtype=self.dtype)
y = zeros(6, x.dtype)
y = self.blas_func(x, y, n=3, incy=2)
assert_array_equal(x, y[::2])
def test_x_and_y_stride(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
y = self.blas_func(x, y, n=3, incx=4, incy=2)
assert_array_equal(x[::4], y[::2])
def test_x_bad_size(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(x, y, n=4, incx=5)
def test_y_bad_size(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(x, y, n=3, incy=5)
# def test_y_bad_type(self):
## Hmmm. Should this work? What should be the output.
# x = arange(3.,dtype=self.dtype)
# y = zeros(shape(x))
# self.blas_func(x,y)
# assert_array_equal(x,y)
try:
class TestScopy(BaseCopy):
blas_func = fblas.scopy
dtype = float32
except AttributeError:
class TestScopy:
pass
class TestDcopy(BaseCopy):
blas_func = fblas.dcopy
dtype = float64
try:
class TestCcopy(BaseCopy):
blas_func = fblas.ccopy
dtype = complex64
except AttributeError:
class TestCcopy:
pass
class TestZcopy(BaseCopy):
blas_func = fblas.zcopy
dtype = complex128
##################################################
# Test blas ?swap
class BaseSwap:
''' Mixin class for swap tests '''
def test_simple(self):
x = arange(3., dtype=self.dtype)
y = zeros(shape(x), x.dtype)
desired_x = y.copy()
desired_y = x.copy()
x, y = self.blas_func(x, y)
assert_array_equal(desired_x, x)
assert_array_equal(desired_y, y)
def test_x_stride(self):
x = arange(6., dtype=self.dtype)
y = zeros(3, x.dtype)
desired_x = y.copy()
desired_y = x.copy()[::2]
x, y = self.blas_func(x, y, n=3, incx=2)
assert_array_equal(desired_x, x[::2])
assert_array_equal(desired_y, y)
def test_y_stride(self):
x = arange(3., dtype=self.dtype)
y = zeros(6, x.dtype)
desired_x = y.copy()[::2]
desired_y = x.copy()
x, y = self.blas_func(x, y, n=3, incy=2)
assert_array_equal(desired_x, x)
assert_array_equal(desired_y, y[::2])
def test_x_and_y_stride(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
desired_x = y.copy()[::2]
desired_y = x.copy()[::4]
x, y = self.blas_func(x, y, n=3, incx=4, incy=2)
assert_array_equal(desired_x, x[::4])
assert_array_equal(desired_y, y[::2])
def test_x_bad_size(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(x, y, n=4, incx=5)
def test_y_bad_size(self):
x = arange(12., dtype=self.dtype)
y = zeros(6, x.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(x, y, n=3, incy=5)
try:
class TestSswap(BaseSwap):
blas_func = fblas.sswap
dtype = float32
except AttributeError:
class TestSswap:
pass
class TestDswap(BaseSwap):
blas_func = fblas.dswap
dtype = float64
try:
class TestCswap(BaseSwap):
blas_func = fblas.cswap
dtype = complex64
except AttributeError:
class TestCswap:
pass
class TestZswap(BaseSwap):
blas_func = fblas.zswap
dtype = complex128
##################################################
# Test blas ?gemv
# This will be a mess to test all cases.
class BaseGemv:
''' Mixin class for gemv tests '''
def get_data(self, x_stride=1, y_stride=1):
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1+1j, dtype=self.dtype)
from numpy.random import normal, seed
seed(1234)
alpha = array(1., dtype=self.dtype) * mult
beta = array(1., dtype=self.dtype) * mult
a = normal(0., 1., (3, 3)).astype(self.dtype) * mult
x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult
y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult
return alpha, beta, a, x, y
def test_simple(self):
alpha, beta, a, x, y = self.get_data()
desired_y = alpha*matrixmultiply(a, x)+beta*y
y = self.blas_func(alpha, a, x, beta, y)
assert_array_almost_equal(desired_y, y)
def test_default_beta_y(self):
alpha, beta, a, x, y = self.get_data()
desired_y = matrixmultiply(a, x)
y = self.blas_func(1, a, x)
assert_array_almost_equal(desired_y, y)
def test_simple_transpose(self):
alpha, beta, a, x, y = self.get_data()
desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y
y = self.blas_func(alpha, a, x, beta, y, trans=1)
assert_array_almost_equal(desired_y, y)
def test_simple_transpose_conj(self):
alpha, beta, a, x, y = self.get_data()
desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y
y = self.blas_func(alpha, a, x, beta, y, trans=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride(self):
alpha, beta, a, x, y = self.get_data(x_stride=2)
desired_y = alpha*matrixmultiply(a, x[::2])+beta*y
y = self.blas_func(alpha, a, x, beta, y, incx=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride_transpose(self):
alpha, beta, a, x, y = self.get_data(x_stride=2)
desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y
y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride_assert(self):
# What is the use of this test?
alpha, beta, a, x, y = self.get_data(x_stride=2)
with pytest.raises(Exception, match='failed for 3rd argument'):
y = self.blas_func(1, a, x, 1, y, trans=0, incx=3)
with pytest.raises(Exception, match='failed for 3rd argument'):
y = self.blas_func(1, a, x, 1, y, trans=1, incx=3)
def test_y_stride(self):
alpha, beta, a, x, y = self.get_data(y_stride=2)
desired_y = y.copy()
desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2]
y = self.blas_func(alpha, a, x, beta, y, incy=2)
assert_array_almost_equal(desired_y, y)
def test_y_stride_transpose(self):
alpha, beta, a, x, y = self.get_data(y_stride=2)
desired_y = y.copy()
desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2]
y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2)
assert_array_almost_equal(desired_y, y)
def test_y_stride_assert(self):
# What is the use of this test?
alpha, beta, a, x, y = self.get_data(y_stride=2)
with pytest.raises(Exception, match='failed for 2nd keyword'):
y = self.blas_func(1, a, x, 1, y, trans=0, incy=3)
with pytest.raises(Exception, match='failed for 2nd keyword'):
y = self.blas_func(1, a, x, 1, y, trans=1, incy=3)
try:
class TestSgemv(BaseGemv):
blas_func = fblas.sgemv
dtype = float32
def test_sgemv_on_osx(self):
from itertools import product
import sys
import numpy as np
if sys.platform != 'darwin':
return
def aligned_array(shape, align, dtype, order='C'):
# Make array shape `shape` with aligned at `align` bytes
d = dtype()
# Make array of correct size with `align` extra bytes
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
# Find offset into array giving desired alignment
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
# Copy `arr` into an aligned array with same shape
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(self.blas_func(1.0, A, X), desired,
rtol=1e-5, atol=1e-7)
testdata = product((15, 32), (10000,), (200, 89), ('C', 'F'))
for align, m, n, a_order in testdata:
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32, order=a_order)
assert_dot_close(A_f, X_f, desired)
except AttributeError:
class TestSgemv:
pass
class TestDgemv(BaseGemv):
blas_func = fblas.dgemv
dtype = float64
try:
class TestCgemv(BaseGemv):
blas_func = fblas.cgemv
dtype = complex64
except AttributeError:
class TestCgemv:
pass
class TestZgemv(BaseGemv):
blas_func = fblas.zgemv
dtype = complex128
"""
##################################################
### Test blas ?ger
### This will be a mess to test all cases.
class BaseGer:
def get_data(self,x_stride=1,y_stride=1):
from numpy.random import normal, seed
seed(1234)
alpha = array(1., dtype = self.dtype)
a = normal(0.,1.,(3,3)).astype(self.dtype)
x = arange(shape(a)[0]*x_stride,dtype=self.dtype)
y = arange(shape(a)[1]*y_stride,dtype=self.dtype)
return alpha,a,x,y
def test_simple(self):
alpha,a,x,y = self.get_data()
# tranpose takes care of Fortran vs. C(and Python) memory layout
desired_a = alpha*transpose(x[:,newaxis]*y) + a
self.blas_func(x,y,a)
assert_array_almost_equal(desired_a,a)
def test_x_stride(self):
alpha,a,x,y = self.get_data(x_stride=2)
desired_a = alpha*transpose(x[::2,newaxis]*y) + a
self.blas_func(x,y,a,incx=2)
assert_array_almost_equal(desired_a,a)
def test_x_stride_assert(self):
alpha,a,x,y = self.get_data(x_stride=2)
with pytest.raises(ValueError, match='foo'):
self.blas_func(x,y,a,incx=3)
def test_y_stride(self):
alpha,a,x,y = self.get_data(y_stride=2)
desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a
self.blas_func(x,y,a,incy=2)
assert_array_almost_equal(desired_a,a)
def test_y_stride_assert(self):
alpha,a,x,y = self.get_data(y_stride=2)
with pytest.raises(ValueError, match='foo'):
self.blas_func(a,x,y,incy=3)
class TestSger(BaseGer):
blas_func = fblas.sger
dtype = float32
class TestDger(BaseGer):
blas_func = fblas.dger
dtype = float64
"""
##################################################
# Test blas ?gerc
# This will be a mess to test all cases.
"""
class BaseGerComplex(BaseGer):
def get_data(self,x_stride=1,y_stride=1):
from numpy.random import normal, seed
seed(1234)
alpha = array(1+1j, dtype = self.dtype)
a = normal(0.,1.,(3,3)).astype(self.dtype)
a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype)
x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype)
x = x + x * array(1j, dtype = self.dtype)
y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype)
y = y + y * array(1j, dtype = self.dtype)
return alpha,a,x,y
def test_simple(self):
alpha,a,x,y = self.get_data()
# tranpose takes care of Fortran vs. C(and Python) memory layout
a = a * array(0.,dtype = self.dtype)
#desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a
desired_a = alpha*transpose(x[:,newaxis]*y) + a
#self.blas_func(x,y,a,alpha = alpha)
fblas.cgeru(x,y,a,alpha = alpha)
assert_array_almost_equal(desired_a,a)
#def test_x_stride(self):
# alpha,a,x,y = self.get_data(x_stride=2)
# desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a
# self.blas_func(x,y,a,incx=2)
# assert_array_almost_equal(desired_a,a)
#def test_y_stride(self):
# alpha,a,x,y = self.get_data(y_stride=2)
# desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a
# self.blas_func(x,y,a,incy=2)
# assert_array_almost_equal(desired_a,a)
class TestCgeru(BaseGerComplex):
blas_func = fblas.cgeru
dtype = complex64
def transform(self,x):
return x
class TestZgeru(BaseGerComplex):
blas_func = fblas.zgeru
dtype = complex128
def transform(self,x):
return x
class TestCgerc(BaseGerComplex):
blas_func = fblas.cgerc
dtype = complex64
def transform(self,x):
return conjugate(x)
class TestZgerc(BaseGerComplex):
blas_func = fblas.zgerc
dtype = complex128
def transform(self,x):
return conjugate(x)
"""
| 18,685
| 29.733553
| 77
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp_ldl.py
|
from numpy.testing import assert_array_almost_equal, assert_allclose, assert_
from numpy import (array, eye, zeros, empty_like, empty, tril_indices_from,
tril, triu_indices_from, spacing, float32, float64,
complex64, complex128)
from numpy.random import rand, randint, seed
from scipy.linalg import ldl
import pytest
from pytest import raises as assert_raises, warns
from numpy import ComplexWarning
def test_args():
A = eye(3)
# Nonsquare array
assert_raises(ValueError, ldl, A[:, :2])
# Complex matrix with imaginary diagonal entries with "hermitian=True"
with warns(ComplexWarning):
ldl(A*1j)
def test_empty_array():
a = empty((0, 0), dtype=complex)
l, d, p = ldl(empty((0, 0)))
assert_array_almost_equal(l, empty_like(a))
assert_array_almost_equal(d, empty_like(a))
assert_array_almost_equal(p, array([], dtype=int))
def test_simple():
a = array([[-0.39-0.71j, 5.14-0.64j, -7.86-2.96j, 3.80+0.92j],
[5.14-0.64j, 8.86+1.81j, -3.52+0.58j, 5.32-1.59j],
[-7.86-2.96j, -3.52+0.58j, -2.83-0.03j, -1.54-2.86j],
[3.80+0.92j, 5.32-1.59j, -1.54-2.86j, -0.56+0.12j]])
b = array([[5., 10, 1, 18],
[10., 2, 11, 1],
[1., 11, 19, 9],
[18., 1, 9, 0]])
c = array([[52., 97, 112, 107, 50],
[97., 114, 89, 98, 13],
[112., 89, 64, 33, 6],
[107., 98, 33, 60, 73],
[50., 13, 6, 73, 77]])
d = array([[2., 2, -4, 0, 4],
[2., -2, -2, 10, -8],
[-4., -2, 6, -8, -4],
[0., 10, -8, 6, -6],
[4., -8, -4, -6, 10]])
e = array([[-1.36+0.00j, 0+0j, 0+0j, 0+0j],
[1.58-0.90j, -8.87+0j, 0+0j, 0+0j],
[2.21+0.21j, -1.84+0.03j, -4.63+0j, 0+0j],
[3.91-1.50j, -1.78-1.18j, 0.11-0.11j, -1.84+0.00j]])
for x in (b, c, d):
l, d, p = ldl(x)
assert_allclose(l.dot(d).dot(l.T), x, atol=spacing(1000.), rtol=0)
u, d, p = ldl(x, lower=False)
assert_allclose(u.dot(d).dot(u.T), x, atol=spacing(1000.), rtol=0)
l, d, p = ldl(a, hermitian=False)
assert_allclose(l.dot(d).dot(l.T), a, atol=spacing(1000.), rtol=0)
u, d, p = ldl(a, lower=False, hermitian=False)
assert_allclose(u.dot(d).dot(u.T), a, atol=spacing(1000.), rtol=0)
# Use upper part for the computation and use the lower part for comparison
l, d, p = ldl(e.conj().T, lower=0)
assert_allclose(tril(l.dot(d).dot(l.conj().T)-e), zeros((4, 4)),
atol=spacing(1000.), rtol=0)
def test_permutations():
seed(1234)
for _ in range(10):
n = randint(1, 100)
# Random real/complex array
x = rand(n, n) if randint(2) else rand(n, n) + rand(n, n)*1j
x = x + x.conj().T
x += eye(n)*randint(5, 1e6)
l_ind = tril_indices_from(x, k=-1)
u_ind = triu_indices_from(x, k=1)
# Test whether permutations lead to a triangular array
u, d, p = ldl(x, lower=0)
# lower part should be zero
assert_(not any(u[p, :][l_ind]), f'Spin {_} failed')
l, d, p = ldl(x, lower=1)
# upper part should be zero
assert_(not any(l[p, :][u_ind]), f'Spin {_} failed')
@pytest.mark.parametrize("dtype", [float32, float64])
@pytest.mark.parametrize("n", [30, 150])
def test_ldl_type_size_combinations_real(n, dtype):
seed(1234)
msg = (f"Failed for size: {n}, dtype: {dtype}")
x = rand(n, n).astype(dtype)
x = x + x.T
x += eye(n, dtype=dtype)*dtype(randint(5, 1e6))
l, d1, p = ldl(x)
u, d2, p = ldl(x, lower=0)
rtol = 1e-4 if dtype is float32 else 1e-10
assert_allclose(l.dot(d1).dot(l.T), x, rtol=rtol, err_msg=msg)
assert_allclose(u.dot(d2).dot(u.T), x, rtol=rtol, err_msg=msg)
@pytest.mark.parametrize("dtype", [complex64, complex128])
@pytest.mark.parametrize("n", [30, 150])
def test_ldl_type_size_combinations_complex(n, dtype):
seed(1234)
msg1 = (f"Her failed for size: {n}, dtype: {dtype}")
msg2 = (f"Sym failed for size: {n}, dtype: {dtype}")
# Complex hermitian upper/lower
x = (rand(n, n)+1j*rand(n, n)).astype(dtype)
x = x+x.conj().T
x += eye(n, dtype=dtype)*dtype(randint(5, 1e6))
l, d1, p = ldl(x)
u, d2, p = ldl(x, lower=0)
rtol = 2e-4 if dtype is complex64 else 1e-10
assert_allclose(l.dot(d1).dot(l.conj().T), x, rtol=rtol, err_msg=msg1)
assert_allclose(u.dot(d2).dot(u.conj().T), x, rtol=rtol, err_msg=msg1)
# Complex symmetric upper/lower
x = (rand(n, n)+1j*rand(n, n)).astype(dtype)
x = x+x.T
x += eye(n, dtype=dtype)*dtype(randint(5, 1e6))
l, d1, p = ldl(x, hermitian=0)
u, d2, p = ldl(x, lower=0, hermitian=0)
assert_allclose(l.dot(d1).dot(l.T), x, rtol=rtol, err_msg=msg2)
assert_allclose(u.dot(d2).dot(u.T), x, rtol=rtol, err_msg=msg2)
| 4,933
| 35.014599
| 78
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_blas.py
|
#
# Created by: Pearu Peterson, April 2002
#
__usage__ = """
Build linalg:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
"""
import math
import pytest
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_array_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from numpy import float32, float64, complex64, complex128, arange, triu, \
tril, zeros, tril_indices, ones, mod, diag, append, eye, \
nonzero
from numpy.random import rand, seed
from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve
try:
from scipy.linalg import _cblas as cblas
except ImportError:
cblas = None
REAL_DTYPES = [float32, float64]
COMPLEX_DTYPES = [complex64, complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def test_get_blas_funcs():
# check that it returns Fortran code for arrays that are
# fortran-ordered
f1, f2, f3 = get_blas_funcs(
('axpy', 'axpy', 'axpy'),
(np.empty((2, 2), dtype=np.complex64, order='F'),
np.empty((2, 2), dtype=np.complex128, order='C'))
)
# get_blas_funcs will choose libraries depending on most generic
# array
assert_equal(f1.typecode, 'z')
assert_equal(f2.typecode, 'z')
if cblas is not None:
assert_equal(f1.module_name, 'cblas')
assert_equal(f2.module_name, 'cblas')
# check defaults.
f1 = get_blas_funcs('rotg')
assert_equal(f1.typecode, 'd')
# check also dtype interface
f1 = get_blas_funcs('gemm', dtype=np.complex64)
assert_equal(f1.typecode, 'c')
f1 = get_blas_funcs('gemm', dtype='F')
assert_equal(f1.typecode, 'c')
# extended precision complex
f1 = get_blas_funcs('gemm', dtype=np.longcomplex)
assert_equal(f1.typecode, 'z')
# check safe complex upcasting
f1 = get_blas_funcs('axpy',
(np.empty((2, 2), dtype=np.float64),
np.empty((2, 2), dtype=np.complex64))
)
assert_equal(f1.typecode, 'z')
def test_get_blas_funcs_alias():
# check alias for get_blas_funcs
f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64)
assert f.typecode == 'c'
assert g.typecode == 'c'
f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64)
assert f is g
assert f is h
class TestCBLAS1Simple:
def test_axpy(self):
for p in 'sd':
f = getattr(cblas, p+'axpy', None)
if f is None:
continue
assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
[7, 9, 18])
for p in 'cz':
f = getattr(cblas, p+'axpy', None)
if f is None:
continue
assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
[7, 10j-1, 18])
class TestFBLAS1Simple:
def test_axpy(self):
for p in 'sd':
f = getattr(fblas, p+'axpy', None)
if f is None:
continue
assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
[7, 9, 18])
for p in 'cz':
f = getattr(fblas, p+'axpy', None)
if f is None:
continue
assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
[7, 10j-1, 18])
def test_copy(self):
for p in 'sd':
f = getattr(fblas, p+'copy', None)
if f is None:
continue
assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5])
for p in 'cz':
f = getattr(fblas, p+'copy', None)
if f is None:
continue
assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j])
def test_asum(self):
for p in 'sd':
f = getattr(fblas, p+'asum', None)
if f is None:
continue
assert_almost_equal(f([3, -4, 5]), 12)
for p in ['sc', 'dz']:
f = getattr(fblas, p+'asum', None)
if f is None:
continue
assert_almost_equal(f([3j, -4, 3-4j]), 14)
def test_dot(self):
for p in 'sd':
f = getattr(fblas, p+'dot', None)
if f is None:
continue
assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9)
def test_complex_dotu(self):
for p in 'cz':
f = getattr(fblas, p+'dotu', None)
if f is None:
continue
assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j)
def test_complex_dotc(self):
for p in 'cz':
f = getattr(fblas, p+'dotc', None)
if f is None:
continue
assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j)
def test_nrm2(self):
for p in 'sd':
f = getattr(fblas, p+'nrm2', None)
if f is None:
continue
assert_almost_equal(f([3, -4, 5]), math.sqrt(50))
for p in ['c', 'z', 'sc', 'dz']:
f = getattr(fblas, p+'nrm2', None)
if f is None:
continue
assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50))
def test_scal(self):
for p in 'sd':
f = getattr(fblas, p+'scal', None)
if f is None:
continue
assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10])
for p in 'cz':
f = getattr(fblas, p+'scal', None)
if f is None:
continue
assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j])
for p in ['cs', 'zd']:
f = getattr(fblas, p+'scal', None)
if f is None:
continue
assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j])
def test_swap(self):
for p in 'sd':
f = getattr(fblas, p+'swap', None)
if f is None:
continue
x, y = [2, 3, 1], [-2, 3, 7]
x1, y1 = f(x, y)
assert_array_almost_equal(x1, y)
assert_array_almost_equal(y1, x)
for p in 'cz':
f = getattr(fblas, p+'swap', None)
if f is None:
continue
x, y = [2, 3j, 1], [-2, 3, 7-3j]
x1, y1 = f(x, y)
assert_array_almost_equal(x1, y)
assert_array_almost_equal(y1, x)
def test_amax(self):
for p in 'sd':
f = getattr(fblas, 'i'+p+'amax')
assert_equal(f([-2, 4, 3]), 1)
for p in 'cz':
f = getattr(fblas, 'i'+p+'amax')
assert_equal(f([-5, 4+3j, 6]), 1)
# XXX: need tests for rot,rotm,rotg,rotmg
class TestFBLAS2Simple:
def test_gemv(self):
for p in 'sd':
f = getattr(fblas, p+'gemv', None)
if f is None:
continue
assert_array_almost_equal(f(3, [[3]], [-4]), [-36])
assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21])
for p in 'cz':
f = getattr(fblas, p+'gemv', None)
if f is None:
continue
assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j])
assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]),
[-48-21j])
def test_ger(self):
for p in 'sd':
f = getattr(fblas, p+'ger', None)
if f is None:
continue
assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]])
assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]),
[[6, 8], [12, 16], [18, 24]])
assert_array_almost_equal(f(1, [1, 2], [3, 4],
a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]])
for p in 'cz':
f = getattr(fblas, p+'geru', None)
if f is None:
continue
assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
[[3j, 4j], [6, 8]])
assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]),
[[6, 8], [12, 16], [18, 24]])
for p in 'cz':
for name in ('ger', 'gerc'):
f = getattr(fblas, p+name, None)
if f is None:
continue
assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
[[3j, 4j], [6, 8]])
assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]),
[[6, 8], [12, 16], [18, 24]])
def test_syr_her(self):
x = np.arange(1, 5, dtype='d')
resx = np.triu(x[:, np.newaxis] * x)
resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1])
y = np.linspace(0, 8.5, 17, endpoint=False)
z = np.arange(1, 9, dtype='d').view('D')
resz = np.triu(z[:, np.newaxis] * z)
resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1])
rehz = np.triu(z[:, np.newaxis] * z.conj())
rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj())
w = np.c_[np.zeros(4), z, np.zeros(4)].ravel()
for p, rtol in zip('sd', [1e-7, 1e-14]):
f = getattr(fblas, p+'syr', None)
if f is None:
continue
assert_allclose(f(1.0, x), resx, rtol=rtol)
assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol)
assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol)
# negative increments imply reversed vectors in blas
assert_allclose(f(1.0, y, incx=-2, offx=2, n=4),
resx_reverse, rtol=rtol)
a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F')
b = f(1.0, x, a=a, overwrite_a=True)
assert_allclose(a, resx, rtol=rtol)
b = f(2.0, x, a=a)
assert_(a is not b)
assert_allclose(b, 3*resx, rtol=rtol)
assert_raises(Exception, f, 1.0, x, incx=0)
assert_raises(Exception, f, 1.0, x, offx=5)
assert_raises(Exception, f, 1.0, x, offx=-2)
assert_raises(Exception, f, 1.0, x, n=-2)
assert_raises(Exception, f, 1.0, x, n=5)
assert_raises(Exception, f, 1.0, x, lower=2)
assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
for p, rtol in zip('cz', [1e-7, 1e-14]):
f = getattr(fblas, p+'syr', None)
if f is None:
continue
assert_allclose(f(1.0, z), resz, rtol=rtol)
assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol)
assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol)
# negative increments imply reversed vectors in blas
assert_allclose(f(1.0, w, incx=-3, offx=1, n=4),
resz_reverse, rtol=rtol)
a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
b = f(1.0, z, a=a, overwrite_a=True)
assert_allclose(a, resz, rtol=rtol)
b = f(2.0, z, a=a)
assert_(a is not b)
assert_allclose(b, 3*resz, rtol=rtol)
assert_raises(Exception, f, 1.0, x, incx=0)
assert_raises(Exception, f, 1.0, x, offx=5)
assert_raises(Exception, f, 1.0, x, offx=-2)
assert_raises(Exception, f, 1.0, x, n=-2)
assert_raises(Exception, f, 1.0, x, n=5)
assert_raises(Exception, f, 1.0, x, lower=2)
assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
for p, rtol in zip('cz', [1e-7, 1e-14]):
f = getattr(fblas, p+'her', None)
if f is None:
continue
assert_allclose(f(1.0, z), rehz, rtol=rtol)
assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol)
assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol)
# negative increments imply reversed vectors in blas
assert_allclose(f(1.0, w, incx=-3, offx=1, n=4),
rehz_reverse, rtol=rtol)
a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
b = f(1.0, z, a=a, overwrite_a=True)
assert_allclose(a, rehz, rtol=rtol)
b = f(2.0, z, a=a)
assert_(a is not b)
assert_allclose(b, 3*rehz, rtol=rtol)
assert_raises(Exception, f, 1.0, x, incx=0)
assert_raises(Exception, f, 1.0, x, offx=5)
assert_raises(Exception, f, 1.0, x, offx=-2)
assert_raises(Exception, f, 1.0, x, n=-2)
assert_raises(Exception, f, 1.0, x, n=5)
assert_raises(Exception, f, 1.0, x, lower=2)
assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
def test_syr2(self):
x = np.arange(1, 5, dtype='d')
y = np.arange(5, 9, dtype='d')
resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x)
resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1]
+ y[::-1, np.newaxis] * x[::-1])
q = np.linspace(0, 8.5, 17, endpoint=False)
for p, rtol in zip('sd', [1e-7, 1e-14]):
f = getattr(fblas, p+'syr2', None)
if f is None:
continue
assert_allclose(f(1.0, x, y), resxy, rtol=rtol)
assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol)
assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol)
assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10),
resxy, rtol=rtol)
assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3),
resxy[:3, :3], rtol=rtol)
# negative increments imply reversed vectors in blas
assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10),
resxy_reverse, rtol=rtol)
a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F')
b = f(1.0, x, y, a=a, overwrite_a=True)
assert_allclose(a, resxy, rtol=rtol)
b = f(2.0, x, y, a=a)
assert_(a is not b)
assert_allclose(b, 3*resxy, rtol=rtol)
assert_raises(Exception, f, 1.0, x, y, incx=0)
assert_raises(Exception, f, 1.0, x, y, offx=5)
assert_raises(Exception, f, 1.0, x, y, offx=-2)
assert_raises(Exception, f, 1.0, x, y, incy=0)
assert_raises(Exception, f, 1.0, x, y, offy=5)
assert_raises(Exception, f, 1.0, x, y, offy=-2)
assert_raises(Exception, f, 1.0, x, y, n=-2)
assert_raises(Exception, f, 1.0, x, y, n=5)
assert_raises(Exception, f, 1.0, x, y, lower=2)
assert_raises(Exception, f, 1.0, x, y,
a=np.zeros((2, 2), 'd', 'F'))
def test_her2(self):
x = np.arange(1, 9, dtype='d').view('D')
y = np.arange(9, 17, dtype='d').view('D')
resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj()
resxy = np.triu(resxy)
resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj()
resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj()
resxy_reverse = np.triu(resxy_reverse)
u = np.c_[np.zeros(4), x, np.zeros(4)].ravel()
v = np.c_[np.zeros(4), y, np.zeros(4)].ravel()
for p, rtol in zip('cz', [1e-7, 1e-14]):
f = getattr(fblas, p+'her2', None)
if f is None:
continue
assert_allclose(f(1.0, x, y), resxy, rtol=rtol)
assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol)
assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(),
rtol=rtol)
assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1),
resxy, rtol=rtol)
assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3),
resxy[:3, :3], rtol=rtol)
# negative increments imply reversed vectors in blas
assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1),
resxy_reverse, rtol=rtol)
a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
b = f(1.0, x, y, a=a, overwrite_a=True)
assert_allclose(a, resxy, rtol=rtol)
b = f(2.0, x, y, a=a)
assert_(a is not b)
assert_allclose(b, 3*resxy, rtol=rtol)
assert_raises(Exception, f, 1.0, x, y, incx=0)
assert_raises(Exception, f, 1.0, x, y, offx=5)
assert_raises(Exception, f, 1.0, x, y, offx=-2)
assert_raises(Exception, f, 1.0, x, y, incy=0)
assert_raises(Exception, f, 1.0, x, y, offy=5)
assert_raises(Exception, f, 1.0, x, y, offy=-2)
assert_raises(Exception, f, 1.0, x, y, n=-2)
assert_raises(Exception, f, 1.0, x, y, n=5)
assert_raises(Exception, f, 1.0, x, y, lower=2)
assert_raises(Exception, f, 1.0, x, y,
a=np.zeros((2, 2), 'd', 'F'))
def test_gbmv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 7
m = 5
kl = 1
ku = 2
# fake a banded matrix via toeplitz
A = toeplitz(append(rand(kl+1), zeros(m-kl-1)),
append(rand(ku+1), zeros(n-ku-1)))
A = A.astype(dtype)
Ab = zeros((kl+ku+1, n), dtype=dtype)
# Form the banded storage
Ab[2, :5] = A[0, 0] # diag
Ab[1, 1:6] = A[0, 1] # sup1
Ab[0, 2:7] = A[0, 2] # sup2
Ab[3, :4] = A[1, 0] # sub1
x = rand(n).astype(dtype)
y = rand(m).astype(dtype)
alpha, beta = dtype(3), dtype(-5)
func, = get_blas_funcs(('gbmv',), dtype=dtype)
y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab,
x=x, y=y, beta=beta)
y2 = alpha * A.dot(x) + beta * y
assert_array_almost_equal(y1, y2)
y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab,
x=y, y=x, beta=beta, trans=1)
y2 = alpha * A.T.dot(y) + beta * x
assert_array_almost_equal(y1, y2)
def test_sbmv_hbmv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 6
k = 2
A = zeros((n, n), dtype=dtype)
Ab = zeros((k+1, n), dtype=dtype)
# Form the array and its packed banded storage
A[arange(n), arange(n)] = rand(n)
for ind2 in range(1, k+1):
temp = rand(n-ind2)
A[arange(n-ind2), arange(ind2, n)] = temp
Ab[-1-ind2, ind2:] = temp
A = A.astype(dtype)
A = A + A.T if ind < 2 else A + A.conj().T
Ab[-1, :] = diag(A)
x = rand(n).astype(dtype)
y = rand(n).astype(dtype)
alpha, beta = dtype(1.25), dtype(3)
if ind > 1:
func, = get_blas_funcs(('hbmv',), dtype=dtype)
else:
func, = get_blas_funcs(('sbmv',), dtype=dtype)
y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta)
y2 = alpha * A.dot(x) + beta * y
assert_array_almost_equal(y1, y2)
def test_spmv_hpmv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
n = 3
A = rand(n, n).astype(dtype)
if ind > 1:
A += rand(n, n)*1j
A = A.astype(dtype)
A = A + A.T if ind < 4 else A + A.conj().T
c, r = tril_indices(n)
Ap = A[r, c]
x = rand(n).astype(dtype)
y = rand(n).astype(dtype)
xlong = arange(2*n).astype(dtype)
ylong = ones(2*n).astype(dtype)
alpha, beta = dtype(1.25), dtype(2)
if ind > 3:
func, = get_blas_funcs(('hpmv',), dtype=dtype)
else:
func, = get_blas_funcs(('spmv',), dtype=dtype)
y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta)
y2 = alpha * A.dot(x) + beta * y
assert_array_almost_equal(y1, y2)
# Test inc and offsets
y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap,
incx=2, incy=2, offx=n, offy=n)
y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2]
assert_array_almost_equal(y1[3::2], y2)
assert_almost_equal(y1[4], ylong[4])
def test_spr_hpr(self):
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
n = 3
A = rand(n, n).astype(dtype)
if ind > 1:
A += rand(n, n)*1j
A = A.astype(dtype)
A = A + A.T if ind < 4 else A + A.conj().T
c, r = tril_indices(n)
Ap = A[r, c]
x = rand(n).astype(dtype)
alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5)
if ind > 3:
func, = get_blas_funcs(('hpr',), dtype=dtype)
y2 = alpha * x[:, None].dot(x[None, :].conj()) + A
else:
func, = get_blas_funcs(('spr',), dtype=dtype)
y2 = alpha * x[:, None].dot(x[None, :]) + A
y1 = func(n=n, alpha=alpha, ap=Ap, x=x)
y1f = zeros((3, 3), dtype=dtype)
y1f[r, c] = y1
y1f[c, r] = y1.conj() if ind > 3 else y1
assert_array_almost_equal(y1f, y2)
def test_spr2_hpr2(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 3
A = rand(n, n).astype(dtype)
if ind > 1:
A += rand(n, n)*1j
A = A.astype(dtype)
A = A + A.T if ind < 2 else A + A.conj().T
c, r = tril_indices(n)
Ap = A[r, c]
x = rand(n).astype(dtype)
y = rand(n).astype(dtype)
alpha = dtype(2)
if ind > 1:
func, = get_blas_funcs(('hpr2',), dtype=dtype)
else:
func, = get_blas_funcs(('spr2',), dtype=dtype)
u = alpha.conj() * x[:, None].dot(y[None, :].conj())
y2 = A + u + u.conj().T
y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap)
y1f = zeros((3, 3), dtype=dtype)
y1f[r, c] = y1
y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj()
assert_array_almost_equal(y1f, y2)
def test_tbmv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
k = 3
x = rand(n).astype(dtype)
A = zeros((n, n), dtype=dtype)
# Banded upper triangular array
for sup in range(k+1):
A[arange(n-sup), arange(sup, n)] = rand(n-sup)
# Add complex parts for c,z
if ind > 1:
A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype)
# Form the banded storage
Ab = zeros((k+1, n), dtype=dtype)
for row in range(k+1):
Ab[-row-1, row:] = diag(A, k=row)
func, = get_blas_funcs(('tbmv',), dtype=dtype)
y1 = func(k=k, a=Ab, x=x)
y2 = A.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(k=k, a=Ab, x=x, diag=1)
A[arange(n), arange(n)] = dtype(1)
y2 = A.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(k=k, a=Ab, x=x, diag=1, trans=1)
y2 = A.T.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(k=k, a=Ab, x=x, diag=1, trans=2)
y2 = A.conj().T.dot(x)
assert_array_almost_equal(y1, y2)
def test_tbsv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 6
k = 3
x = rand(n).astype(dtype)
A = zeros((n, n), dtype=dtype)
# Banded upper triangular array
for sup in range(k+1):
A[arange(n-sup), arange(sup, n)] = rand(n-sup)
# Add complex parts for c,z
if ind > 1:
A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype)
# Form the banded storage
Ab = zeros((k+1, n), dtype=dtype)
for row in range(k+1):
Ab[-row-1, row:] = diag(A, k=row)
func, = get_blas_funcs(('tbsv',), dtype=dtype)
y1 = func(k=k, a=Ab, x=x)
y2 = solve(A, x)
assert_array_almost_equal(y1, y2)
y1 = func(k=k, a=Ab, x=x, diag=1)
A[arange(n), arange(n)] = dtype(1)
y2 = solve(A, x)
assert_array_almost_equal(y1, y2)
y1 = func(k=k, a=Ab, x=x, diag=1, trans=1)
y2 = solve(A.T, x)
assert_array_almost_equal(y1, y2)
y1 = func(k=k, a=Ab, x=x, diag=1, trans=2)
y2 = solve(A.conj().T, x)
assert_array_almost_equal(y1, y2)
def test_tpmv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
x = rand(n).astype(dtype)
# Upper triangular array
A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
# Form the packed storage
c, r = tril_indices(n)
Ap = A[r, c]
func, = get_blas_funcs(('tpmv',), dtype=dtype)
y1 = func(n=n, ap=Ap, x=x)
y2 = A.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(n=n, ap=Ap, x=x, diag=1)
A[arange(n), arange(n)] = dtype(1)
y2 = A.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
y2 = A.T.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
y2 = A.conj().T.dot(x)
assert_array_almost_equal(y1, y2)
def test_tpsv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
x = rand(n).astype(dtype)
# Upper triangular array
A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
A += eye(n)
# Form the packed storage
c, r = tril_indices(n)
Ap = A[r, c]
func, = get_blas_funcs(('tpsv',), dtype=dtype)
y1 = func(n=n, ap=Ap, x=x)
y2 = solve(A, x)
assert_array_almost_equal(y1, y2)
y1 = func(n=n, ap=Ap, x=x, diag=1)
A[arange(n), arange(n)] = dtype(1)
y2 = solve(A, x)
assert_array_almost_equal(y1, y2)
y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
y2 = solve(A.T, x)
assert_array_almost_equal(y1, y2)
y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
y2 = solve(A.conj().T, x)
assert_array_almost_equal(y1, y2)
def test_trmv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 3
A = (rand(n, n)+eye(n)).astype(dtype)
x = rand(3).astype(dtype)
func, = get_blas_funcs(('trmv',), dtype=dtype)
y1 = func(a=A, x=x)
y2 = triu(A).dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, diag=1)
A[arange(n), arange(n)] = dtype(1)
y2 = triu(A).dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, diag=1, trans=1)
y2 = triu(A).T.dot(x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, diag=1, trans=2)
y2 = triu(A).conj().T.dot(x)
assert_array_almost_equal(y1, y2)
def test_trsv(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 15
A = (rand(n, n)+eye(n)).astype(dtype)
x = rand(n).astype(dtype)
func, = get_blas_funcs(('trsv',), dtype=dtype)
y1 = func(a=A, x=x)
y2 = solve(triu(A), x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, lower=1)
y2 = solve(tril(A), x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, diag=1)
A[arange(n), arange(n)] = dtype(1)
y2 = solve(triu(A), x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, diag=1, trans=1)
y2 = solve(triu(A).T, x)
assert_array_almost_equal(y1, y2)
y1 = func(a=A, x=x, diag=1, trans=2)
y2 = solve(triu(A).conj().T, x)
assert_array_almost_equal(y1, y2)
class TestFBLAS3Simple:
def test_gemm(self):
for p in 'sd':
f = getattr(fblas, p+'gemm', None)
if f is None:
continue
assert_array_almost_equal(f(3, [3], [-4]), [[-36]])
assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21])
for p in 'cz':
f = getattr(fblas, p+'gemm', None)
if f is None:
continue
assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]])
assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j])
def _get_func(func, ps='sdzc'):
"""Just a helper: return a specified BLAS function w/typecode."""
for p in ps:
f = getattr(fblas, p+func, None)
if f is None:
continue
yield f
class TestBLAS3Symm:
def setup_method(self):
self.a = np.array([[1., 2.],
[0., 1.]])
self.b = np.array([[1., 0., 3.],
[0., -1., 2.]])
self.c = np.ones((2, 3))
self.t = np.array([[2., -1., 8.],
[3., 0., 9.]])
def test_symm(self):
for f in _get_func('symm'):
res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
assert_array_almost_equal(res, self.t)
res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
assert_array_almost_equal(res, self.t)
res = f(a=self.a, b=self.b.T, side=1, c=self.c.T,
alpha=1., beta=1.)
assert_array_almost_equal(res, self.t.T)
def test_summ_wrong_side(self):
f = getattr(fblas, 'dsymm', None)
if f is not None:
assert_raises(Exception, f, **{'a': self.a, 'b': self.b,
'alpha': 1, 'side': 1})
# `side=1` means C <- B*A, hence shapes of A and B are to be
# compatible. Otherwise, f2py exception is raised
def test_symm_wrong_uplo(self):
"""SYMM only considers the upper/lower part of A. Hence setting
wrong value for `lower` (default is lower=0, meaning upper triangle)
gives a wrong result.
"""
f = getattr(fblas, 'dsymm', None)
if f is not None:
res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
assert np.allclose(res, self.t)
res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
assert not np.allclose(res, self.t)
class TestBLAS3Syrk:
def setup_method(self):
self.a = np.array([[1., 0.],
[0., -2.],
[2., 3.]])
self.t = np.array([[1., 0., 2.],
[0., 4., -6.],
[2., -6., 13.]])
self.tt = np.array([[5., 6.],
[6., 13.]])
def test_syrk(self):
for f in _get_func('syrk'):
c = f(a=self.a, alpha=1.)
assert_array_almost_equal(np.triu(c), np.triu(self.t))
c = f(a=self.a, alpha=1., lower=1)
assert_array_almost_equal(np.tril(c), np.tril(self.t))
c0 = np.ones(self.t.shape)
c = f(a=self.a, alpha=1., beta=1., c=c0)
assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
c = f(a=self.a, alpha=1., trans=1)
assert_array_almost_equal(np.triu(c), np.triu(self.tt))
# prints '0-th dimension must be fixed to 3 but got 5',
# FIXME: suppress?
# FIXME: how to catch the _fblas.error?
def test_syrk_wrong_c(self):
f = getattr(fblas, 'dsyrk', None)
if f is not None:
assert_raises(Exception, f, **{'a': self.a, 'alpha': 1.,
'c': np.ones((5, 8))})
# if C is supplied, it must have compatible dimensions
class TestBLAS3Syr2k:
def setup_method(self):
self.a = np.array([[1., 0.],
[0., -2.],
[2., 3.]])
self.b = np.array([[0., 1.],
[1., 0.],
[0, 1.]])
self.t = np.array([[0., -1., 3.],
[-1., 0., 0.],
[3., 0., 6.]])
self.tt = np.array([[0., 1.],
[1., 6]])
def test_syr2k(self):
for f in _get_func('syr2k'):
c = f(a=self.a, b=self.b, alpha=1.)
assert_array_almost_equal(np.triu(c), np.triu(self.t))
c = f(a=self.a, b=self.b, alpha=1., lower=1)
assert_array_almost_equal(np.tril(c), np.tril(self.t))
c0 = np.ones(self.t.shape)
c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0)
assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
c = f(a=self.a, b=self.b, alpha=1., trans=1)
assert_array_almost_equal(np.triu(c), np.triu(self.tt))
# prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress?
def test_syr2k_wrong_c(self):
f = getattr(fblas, 'dsyr2k', None)
if f is not None:
assert_raises(Exception, f, **{'a': self.a,
'b': self.b,
'alpha': 1.,
'c': np.zeros((15, 8))})
# if C is supplied, it must have compatible dimensions
class TestSyHe:
"""Quick and simple tests for (zc)-symm, syrk, syr2k."""
def setup_method(self):
self.sigma_y = np.array([[0., -1.j],
[1.j, 0.]])
def test_symm_zc(self):
for f in _get_func('symm', 'zc'):
# NB: a is symmetric w/upper diag of ONLY
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, -1]))
def test_hemm_zc(self):
for f in _get_func('hemm', 'zc'):
# NB: a is hermitian w/upper diag of ONLY
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
def test_syrk_zr(self):
for f in _get_func('syrk', 'zc'):
res = f(a=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([-1, -1]))
def test_herk_zr(self):
for f in _get_func('herk', 'zc'):
res = f(a=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
def test_syr2k_zr(self):
for f in _get_func('syr2k', 'zc'):
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1]))
def test_her2k_zr(self):
for f in _get_func('her2k', 'zc'):
res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1]))
class TestTRMM:
"""Quick and simple tests for dtrmm."""
def setup_method(self):
self.a = np.array([[1., 2., ],
[-2., 1.]])
self.b = np.array([[3., 4., -1.],
[5., 6., -2.]])
self.a2 = np.array([[1, 1, 2, 3],
[0, 1, 4, 5],
[0, 0, 1, 6],
[0, 0, 0, 1]], order="f")
self.b2 = np.array([[1, 4], [2, 5], [3, 6], [7, 8], [9, 10]],
order="f")
@pytest.mark.parametrize("dtype_", DTYPES)
def test_side(self, dtype_):
trmm = get_blas_funcs("trmm", dtype=dtype_)
# Provide large A array that works for side=1 but not 0 (see gh-10841)
assert_raises(Exception, trmm, 1.0, self.a2, self.b2)
res = trmm(1.0, self.a2.astype(dtype_), self.b2.astype(dtype_),
side=1)
k = self.b2.shape[1]
assert_allclose(res, self.b2 @ self.a2[:k, :k], rtol=0.,
atol=100*np.finfo(dtype_).eps)
def test_ab(self):
f = getattr(fblas, 'dtrmm', None)
if f is not None:
result = f(1., self.a, self.b)
# default a is upper triangular
expected = np.array([[13., 16., -5.],
[5., 6., -2.]])
assert_array_almost_equal(result, expected)
def test_ab_lower(self):
f = getattr(fblas, 'dtrmm', None)
if f is not None:
result = f(1., self.a, self.b, lower=True)
expected = np.array([[3., 4., -1.],
[-1., -2., 0.]]) # now a is lower triangular
assert_array_almost_equal(result, expected)
def test_b_overwrites(self):
# BLAS dtrmm modifies B argument in-place.
# Here the default is to copy, but this can be overridden
f = getattr(fblas, 'dtrmm', None)
if f is not None:
for overwr in [True, False]:
bcopy = self.b.copy()
result = f(1., self.a, bcopy, overwrite_b=overwr)
# C-contiguous arrays are copied
assert_(bcopy.flags.f_contiguous is False and
np.may_share_memory(bcopy, result) is False)
assert_equal(bcopy, self.b)
bcopy = np.asfortranarray(self.b.copy()) # or just transpose it
result = f(1., self.a, bcopy, overwrite_b=True)
assert_(bcopy.flags.f_contiguous is True and
np.may_share_memory(bcopy, result) is True)
assert_array_almost_equal(bcopy, result)
def test_trsm():
seed(1234)
for ind, dtype in enumerate(DTYPES):
tol = np.finfo(dtype).eps*1000
func, = get_blas_funcs(('trsm',), dtype=dtype)
# Test protection against size mismatches
A = rand(4, 5).astype(dtype)
B = rand(4, 4).astype(dtype)
alpha = dtype(1)
assert_raises(Exception, func, alpha, A, B)
assert_raises(Exception, func, alpha, A.T, B)
n = 8
m = 7
alpha = dtype(-2.5)
A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m)
A = A.astype(dtype)
Au = triu(A)
Al = tril(A)
B1 = rand(m, n).astype(dtype)
B2 = rand(n, m).astype(dtype)
x1 = func(alpha=alpha, a=A, b=B1)
assert_equal(B1.shape, x1.shape)
x2 = solve(Au, alpha*B1)
assert_allclose(x1, x2, atol=tol)
x1 = func(alpha=alpha, a=A, b=B1, trans_a=1)
x2 = solve(Au.T, alpha*B1)
assert_allclose(x1, x2, atol=tol)
x1 = func(alpha=alpha, a=A, b=B1, trans_a=2)
x2 = solve(Au.conj().T, alpha*B1)
assert_allclose(x1, x2, atol=tol)
x1 = func(alpha=alpha, a=A, b=B1, diag=1)
Au[arange(m), arange(m)] = dtype(1)
x2 = solve(Au, alpha*B1)
assert_allclose(x1, x2, atol=tol)
x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1)
x2 = solve(Au.conj().T, alpha*B2.conj().T)
assert_allclose(x1, x2.conj().T, atol=tol)
x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1)
Al[arange(m), arange(m)] = dtype(1)
x2 = solve(Al.conj().T, alpha*B2.conj().T)
assert_allclose(x1, x2.conj().T, atol=tol)
| 40,417
| 35.676951
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_sketches.py
|
"""Tests for _sketches.py."""
import numpy as np
from numpy.testing import assert_, assert_equal
from scipy.linalg import clarkson_woodruff_transform
from scipy.linalg._sketches import cwt_matrix
from scipy.sparse import issparse, rand
from scipy.sparse.linalg import norm
class TestClarksonWoodruffTransform:
"""
Testing the Clarkson Woodruff Transform
"""
# set seed for generating test matrices
rng = np.random.RandomState(seed=1179103485)
# Test matrix parameters
n_rows = 2000
n_cols = 100
density = 0.1
# Sketch matrix dimensions
n_sketch_rows = 200
# Seeds to test with
seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
A_dense = rng.randn(n_rows, n_cols)
A_csc = rand(
n_rows, n_cols, density=density, format='csc', random_state=rng,
)
A_csr = rand(
n_rows, n_cols, density=density, format='csr', random_state=rng,
)
A_coo = rand(
n_rows, n_cols, density=density, format='coo', random_state=rng,
)
# Collect the test matrices
test_matrices = [
A_dense, A_csc, A_csr, A_coo,
]
# Test vector with norm ~1
x = rng.randn(n_rows, 1) / np.sqrt(n_rows)
def test_sketch_dimensions(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
def test_seed_returns_identical_transform_matrix(self):
for A in self.test_matrices:
for seed in self.seeds:
S1 = cwt_matrix(
self.n_sketch_rows, self.n_rows, seed=seed
).toarray()
S2 = cwt_matrix(
self.n_sketch_rows, self.n_rows, seed=seed
).toarray()
assert_equal(S1, S2)
def test_seed_returns_identically(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch1 = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
sketch2 = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
if issparse(sketch1):
sketch1 = sketch1.toarray()
if issparse(sketch2):
sketch2 = sketch2.toarray()
assert_equal(sketch1, sketch2)
def test_sketch_preserves_frobenius_norm(self):
# Given the probabilistic nature of the sketches
# we run the test multiple times and check that
# we pass all/almost all the tries.
n_errors = 0
for A in self.test_matrices:
if issparse(A):
true_norm = norm(A)
else:
true_norm = np.linalg.norm(A)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed,
)
if issparse(sketch):
sketch_norm = norm(sketch)
else:
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
n_errors += 1
assert_(n_errors == 0)
def test_sketch_preserves_vector_norm(self):
n_errors = 0
n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
true_norm = np.linalg.norm(self.x)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
self.x, n_sketch_rows, seed=seed,
)
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
n_errors += 1
assert_(n_errors == 0)
| 3,960
| 32.285714
| 74
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_misc.py
|
from scipy.linalg import norm
def test_norm():
assert norm([]) == 0.0
| 76
| 11.833333
| 29
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp_polar.py
|
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose, assert_equal)
from scipy.linalg import polar, eigh
diag2 = np.array([[2, 0], [0, 3]])
a13 = np.array([[1, 2, 2]])
precomputed_cases = [
[[[0]], 'right', [[1]], [[0]]],
[[[0]], 'left', [[1]], [[0]]],
[[[9]], 'right', [[1]], [[9]]],
[[[9]], 'left', [[1]], [[9]]],
[diag2, 'right', np.eye(2), diag2],
[diag2, 'left', np.eye(2), diag2],
[a13, 'right', a13/norm(a13[0]), a13.T.dot(a13)/norm(a13[0])],
]
verify_cases = [
[[1, 2], [3, 4]],
[[1, 2, 3]],
[[1], [2], [3]],
[[1, 2, 3], [3, 4, 0]],
[[1, 2], [3, 4], [5, 5]],
[[1, 2], [3, 4+5j]],
[[1, 2, 3j]],
[[1], [2], [3j]],
[[1, 2, 3+2j], [3, 4-1j, -4j]],
[[1, 2], [3-2j, 4+0.5j], [5, 5]],
[[10000, 10, 1], [-1, 2, 3j], [0, 1, 2]],
]
def check_precomputed_polar(a, side, expected_u, expected_p):
# Compare the result of the polar decomposition to a
# precomputed result.
u, p = polar(a, side=side)
assert_allclose(u, expected_u, atol=1e-15)
assert_allclose(p, expected_p, atol=1e-15)
def verify_polar(a):
# Compute the polar decomposition, and then verify that
# the result has all the expected properties.
product_atol = np.sqrt(np.finfo(float).eps)
aa = np.asarray(a)
m, n = aa.shape
u, p = polar(a, side='right')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (n, n))
# a = up
assert_allclose(u.dot(p), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
u, p = polar(a, side='left')
assert_equal(u.shape, (m, n))
assert_equal(p.shape, (m, m))
# a = pu
assert_allclose(p.dot(u), a, atol=product_atol)
if m >= n:
assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
else:
assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
# p is Hermitian positive semidefinite.
assert_allclose(p.conj().T, p)
evals = eigh(p, eigvals_only=True)
nonzero_evals = evals[abs(evals) > 1e-14]
assert_((nonzero_evals >= 0).all())
def test_precomputed_cases():
for a, side, expected_u, expected_p in precomputed_cases:
check_precomputed_polar(a, side, expected_u, expected_p)
def test_verify_cases():
for a in verify_cases:
verify_polar(a)
| 2,646
| 28.087912
| 66
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp_cholesky.py
|
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pytest import raises as assert_raises
from numpy import array, transpose, dot, conjugate, zeros_like, empty
from numpy.random import random
from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \
cho_factor, cho_solve
from scipy.linalg._testutils import assert_no_overwrite
class TestCholesky:
def test_simple(self):
a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
c = cholesky(a)
assert_array_almost_equal(dot(transpose(c), c), a)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1), c)
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
c = cholesky(a, check_finite=False)
assert_array_almost_equal(dot(transpose(c), c), a)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c)
def test_simple_complex(self):
m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]])
a = dot(transpose(conjugate(m)), m)
c = cholesky(a)
a1 = dot(transpose(conjugate(c)), c)
assert_array_almost_equal(a, a1)
c = transpose(c)
a = dot(c, transpose(conjugate(c)))
assert_array_almost_equal(cholesky(a, lower=1), c)
def test_random(self):
n = 20
for k in range(2):
m = random([n, n])
for i in range(n):
m[i, i] = 20*(.1+m[i, i])
a = dot(transpose(m), m)
c = cholesky(a)
a1 = dot(transpose(c), c)
assert_array_almost_equal(a, a1)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1), c)
def test_random_complex(self):
n = 20
for k in range(2):
m = random([n, n])+1j*random([n, n])
for i in range(n):
m[i, i] = 20*(.1+abs(m[i, i]))
a = dot(transpose(conjugate(m)), m)
c = cholesky(a)
a1 = dot(transpose(conjugate(c)), c)
assert_array_almost_equal(a, a1)
c = transpose(c)
a = dot(c, transpose(conjugate(c)))
assert_array_almost_equal(cholesky(a, lower=1), c)
class TestCholeskyBanded:
"""Tests for cholesky_banded() and cho_solve_banded."""
def test_check_finite(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, 0.2],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False, check_finite=False)
ufac = zeros_like(a)
ufac[list(range(4)), list(range(4))] = c[-1]
ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
assert_array_almost_equal(a, dot(ufac.T, ufac))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, False), b, check_finite=False)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_upper_real(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, 0.2],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False)
ufac = zeros_like(a)
ufac[list(range(4)), list(range(4))] = c[-1]
ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
assert_array_almost_equal(a, dot(ufac.T, ufac))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, False), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_upper_complex(self):
# Hermitian positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, -0.2j],
[0.0, 0.0, 0.2j, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, -0.2j],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False)
ufac = zeros_like(a)
ufac[list(range(4)), list(range(4))] = c[-1]
ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
assert_array_almost_equal(a, dot(ufac.conj().T, ufac))
b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0])
x = cho_solve_banded((c, False), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_lower_real(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 0.5, 0.2, -1.0]])
c = cholesky_banded(ab, lower=True)
lfac = zeros_like(a)
lfac[list(range(4)), list(range(4))] = c[0]
lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
assert_array_almost_equal(a, dot(lfac, lfac.T))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, True), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_lower_complex(self):
# Hermitian positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, -0.2j],
[0.0, 0.0, 0.2j, 4.0]])
# Banded storage form of `a`.
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 0.5, 0.2j, -1.0]])
c = cholesky_banded(ab, lower=True)
lfac = zeros_like(a)
lfac[list(range(4)), list(range(4))] = c[0]
lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
assert_array_almost_equal(a, dot(lfac, lfac.conj().T))
b = array([0.0, 0.5j, 3.8j, 3.8])
x = cho_solve_banded((c, True), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0])
class TestOverwrite:
def test_cholesky(self):
assert_no_overwrite(cholesky, [(3, 3)])
def test_cho_factor(self):
assert_no_overwrite(cho_factor, [(3, 3)])
def test_cho_solve(self):
x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
xcho = cho_factor(x)
assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)])
def test_cholesky_banded(self):
assert_no_overwrite(cholesky_banded, [(2, 3)])
def test_cho_solve_banded(self):
x = array([[0, -1, -1], [2, 2, 2]])
xcho = cholesky_banded(x)
assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b),
[(3,)])
class TestEmptyArray:
def test_cho_factor_empty_square(self):
a = empty((0, 0))
b = array([])
c = array([[]])
d = []
e = [[]]
x, _ = cho_factor(a)
assert_array_equal(x, a)
for x in ([b, c, d, e]):
assert_raises(ValueError, cho_factor, x)
| 7,265
| 34.793103
| 78
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_solve_toeplitz.py
|
"""Test functions for linalg._solve_toeplitz module
"""
import numpy as np
from scipy.linalg._solve_toeplitz import levinson
from scipy.linalg import solve, toeplitz, solve_toeplitz
from numpy.testing import assert_equal, assert_allclose
import pytest
from pytest import raises as assert_raises
def test_solve_equivalence():
# For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
random = np.random.RandomState(1234)
for n in (1, 2, 3, 10):
c = random.randn(n)
if random.rand() < 0.5:
c = c + 1j * random.randn(n)
r = random.randn(n)
if random.rand() < 0.5:
r = r + 1j * random.randn(n)
y = random.randn(n)
if random.rand() < 0.5:
y = y + 1j * random.randn(n)
# Check equivalence when both the column and row are provided.
actual = solve_toeplitz((c,r), y)
desired = solve(toeplitz(c, r=r), y)
assert_allclose(actual, desired)
# Check equivalence when the column is provided but not the row.
actual = solve_toeplitz(c, b=y)
desired = solve(toeplitz(c), y)
assert_allclose(actual, desired)
def test_multiple_rhs():
random = np.random.RandomState(1234)
c = random.randn(4)
r = random.randn(4)
for offset in [0, 1j]:
for yshape in ((4,), (4, 3), (4, 3, 2)):
y = random.randn(*yshape) + offset
actual = solve_toeplitz((c,r), b=y)
desired = solve(toeplitz(c, r=r), y)
assert_equal(actual.shape, yshape)
assert_equal(desired.shape, yshape)
assert_allclose(actual, desired)
def test_native_list_arguments():
c = [1,2,4,7]
r = [1,3,9,12]
y = [5,1,4,2]
actual = solve_toeplitz((c,r), y)
desired = solve(toeplitz(c, r=r), y)
assert_allclose(actual, desired)
def test_zero_diag_error():
# The Levinson-Durbin implementation fails when the diagonal is zero.
random = np.random.RandomState(1234)
n = 4
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
c[0] = 0
assert_raises(np.linalg.LinAlgError,
solve_toeplitz, (c, r), b=y)
def test_wikipedia_counterexample():
# The Levinson-Durbin implementation also fails in other cases.
# This example is from the talk page of the wikipedia article.
random = np.random.RandomState(1234)
c = [2, 2, 1]
y = random.randn(3)
assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)
def test_reflection_coeffs():
# check that the partial solutions are given by the reflection
# coefficients
random = np.random.RandomState(1234)
y_d = random.randn(10)
y_z = random.randn(10) + 1j
reflection_coeffs_d = [1]
reflection_coeffs_z = [1]
for i in range(2, 10):
reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])
y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
_, ref_d = levinson(y_d_concat, b=y_d[1:])
_, ref_z = levinson(y_z_concat, b=y_z[1:])
assert_allclose(reflection_coeffs_d, ref_d[:-1])
assert_allclose(reflection_coeffs_z, ref_z[:-1])
@pytest.mark.xfail(reason='Instability of Levinson iteration')
def test_unstable():
# this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
# I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
# Partial Pivoting for Matrices with Displacement Structure"
# Mathematics of Computation, 64, 212 (1995), pp 1557-1576
# which can be unstable for levinson recursion.
# other fast toeplitz solvers such as GKO or Burg should be better.
random = np.random.RandomState(1234)
n = 100
c = 0.9 ** (np.arange(n)**2)
y = random.randn(n)
solution1 = solve_toeplitz(c, b=y)
solution2 = solve(toeplitz(c), y)
assert_allclose(solution1, solution2)
| 4,010
| 31.877049
| 79
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_cython_blas.py
|
import numpy as np
from numpy.testing import (assert_allclose,
assert_equal)
import scipy.linalg.cython_blas as blas
class TestDGEMM:
def test_transposes(self):
a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2]
b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2]
c = np.empty((2, 4))[:2,:2]
blas._test_dgemm(1., a, b, 0., c)
assert_allclose(c, a.dot(b))
blas._test_dgemm(1., a.T, b, 0., c)
assert_allclose(c, a.T.dot(b))
blas._test_dgemm(1., a, b.T, 0., c)
assert_allclose(c, a.dot(b.T))
blas._test_dgemm(1., a.T, b.T, 0., c)
assert_allclose(c, a.T.dot(b.T))
blas._test_dgemm(1., a, b, 0., c.T)
assert_allclose(c, a.dot(b).T)
blas._test_dgemm(1., a.T, b, 0., c.T)
assert_allclose(c, a.T.dot(b).T)
blas._test_dgemm(1., a, b.T, 0., c.T)
assert_allclose(c, a.dot(b.T).T)
blas._test_dgemm(1., a.T, b.T, 0., c.T)
assert_allclose(c, a.T.dot(b.T).T)
def test_shapes(self):
a = np.arange(6, dtype='d').reshape((3, 2))
b = np.arange(-6, 2, dtype='d').reshape((2, 4))
c = np.empty((3, 4))
blas._test_dgemm(1., a, b, 0., c)
assert_allclose(c, a.dot(b))
blas._test_dgemm(1., b.T, a.T, 0., c.T)
assert_allclose(c, b.T.dot(a.T).T)
class TestWfuncPointers:
""" Test the function pointers that are expected to fail on
Mac OS X without the additional entry statement in their definitions
in fblas_l1.pyf.src. """
def test_complex_args(self):
cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64)
cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64)
assert_allclose(blas._test_cdotc(cx, cy),
-17.6468753815+21.3718757629j)
assert_allclose(blas._test_cdotu(cx, cy),
-6.11562538147+30.3156242371j)
assert_equal(blas._test_icamax(cx), 3)
assert_allclose(blas._test_scasum(cx), 18.625)
assert_allclose(blas._test_scnrm2(cx), 13.1796483994)
assert_allclose(blas._test_cdotc(cx[::2], cy[::2]),
-18.1000003815+21.2000007629j)
assert_allclose(blas._test_cdotu(cx[::2], cy[::2]),
-6.10000038147+30.7999992371j)
assert_allclose(blas._test_scasum(cx[::2]), 18.)
assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499)
def test_double_args(self):
x = np.array([5., -3, -.5], np.float64)
y = np.array([2, 1, .5], np.float64)
assert_allclose(blas._test_dasum(x), 8.5)
assert_allclose(blas._test_ddot(x, y), 6.75)
assert_allclose(blas._test_dnrm2(x), 5.85234975815)
assert_allclose(blas._test_dasum(x[::2]), 5.5)
assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75)
assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297)
assert_equal(blas._test_idamax(x), 1)
def test_float_args(self):
x = np.array([5., -3, -.5], np.float32)
y = np.array([2, 1, .5], np.float32)
assert_equal(blas._test_isamax(x), 1)
assert_allclose(blas._test_sasum(x), 8.5)
assert_allclose(blas._test_sdot(x, y), 6.75)
assert_allclose(blas._test_snrm2(x), 5.85234975815)
assert_allclose(blas._test_sasum(x[::2]), 5.5)
assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75)
assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297)
def test_double_complex_args(self):
cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128)
cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128)
assert_equal(blas._test_izamax(cx), 3)
assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j)
assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j)
assert_allclose(blas._test_zdotc(cx[::2], cy[::2]), -18.5625+22.125j)
assert_allclose(blas._test_zdotu(cx[::2], cy[::2]), -6.5625+31.875j)
| 4,087
| 33.352941
| 77
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_cython_lapack.py
|
from numpy.testing import assert_allclose
from scipy.linalg import cython_lapack as cython_lapack
from scipy.linalg import lapack
class TestLamch:
def test_slamch(self):
for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
assert_allclose(cython_lapack._test_slamch(c),
lapack.slamch(c))
def test_dlamch(self):
for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
assert_allclose(cython_lapack._test_dlamch(c),
lapack.dlamch(c))
| 574
| 30.944444
| 78
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_lapack.py
|
#
# Created by: Pearu Peterson, September 2002
#
import sys
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh, qz)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.__config__ import CONFIG
except ImportError:
CONFIG = None
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
blas_provider = blas_version = None
if CONFIG is not None:
blas_provider = CONFIG['Build Dependencies']['blas']['name']
blas_version = CONFIG['Build Dependencies']['blas']['version']
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = {
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
}
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple:
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack:
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers:
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression:
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr:
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4:
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs:
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that _compute_lwork() correctly works around a bug in
# LAPACK versions older than 3.10.1.
sgesdd_lwork = get_lapack_funcs('gesdd_lwork', dtype=np.float32,
ilp64='preferred')
n = 9537
lwork = _compute_lwork(sgesdd_lwork, n, n,
compute_uv=True, full_matrices=True)
# If we called the Fortran function SGESDD directly with IWORK=-1, the
# LAPACK bug would result in lwork being 272929856, which was too small.
# (The result was returned in a single precision float, which does not
# have sufficient precision to represent the exact integer value that it
# computed internally.) The work-around implemented in _compute_lwork()
# will convert that to 272929888. If we are using LAPACK 3.10.1 or later
# (such as in OpenBLAS 0.3.21 or later), the work-around will return
# 272929920, because it does not know which version of LAPACK is being
# used, so it always applies the correction to whatever it is given. We
# will accept either 272929888 or 272929920.
# Note that the acceptable values are a LAPACK implementation detail.
# If a future version of LAPACK changes how SGESDD works, and therefore
# changes the required LWORK size, the acceptable values might have to
# be updated.
assert lwork == 272929888 or lwork == 272929920
class TestSytrd:
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd:
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
seed(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rand(n, n).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rand(n, n).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_hegst():
seed(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
seed(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rand(m, n).astype(dtype))
else:
A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matric C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
seed(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rand(qm, qn).astype(dtype))
C = rand(cn, cn).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_U_T,
A_tf_U_m.conj().T.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L_T,
A_tf_L_m.conj().T.reshape(-1, order='F'))
# Get the original array from RFP
A_tr_U, info = tfttr(n, A_tf_U)
assert_(info == 0)
A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
assert_(info == 0)
A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
assert_(info == 0)
A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_U_T, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
assert_array_almost_equal(A_tr_L_T, tril(A_full))
def test_tpttr_trttp():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A_full = (rand(n, n)).astype(dtype)
trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
A_tp_U, info = trttp(A_full)
assert_(info == 0)
A_tp_L, info = trttp(A_full, uplo='L')
assert_(info == 0)
# Create the TP array manually
inds = tril_indices(n)
A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_U_m[:] = (triu(A_full).T)[inds]
inds = triu_indices(n)
A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_L_m[:] = (tril(A_full).T)[inds]
assert_array_almost_equal(A_tp_U, A_tp_U_m)
assert_array_almost_equal(A_tp_L, A_tp_L_m)
# Get the original array from TP
A_tr_U, info = tpttr(n, A_tp_U)
assert_(info == 0)
A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
def test_pftrf():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
Achol_rfp, info = pftrf(n, Afp)
assert_(info == 0)
A_chol_r, _ = tfttr(n, Achol_rfp)
Achol = cholesky(A)
assert_array_almost_equal(A_chol_r, Achol)
def test_pftri():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array to find its inverse
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
A_inv_rfp, info = pftri(n, A_chol_rfp)
assert_(info == 0)
A_inv_r, _ = tfttr(n, A_inv_rfp)
Ainv = inv(A)
assert_array_almost_equal(A_inv_r, triu(Ainv),
decimal=4 if ind % 2 == 0 else 6)
def test_pftrs():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array and solve a linear system
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
B = ones((n, 3), dtype=dtype)
Bf1 = ones((n+2, 3), dtype=dtype)
Bf2 = ones((n-2, 3), dtype=dtype)
pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
# larger B arrays shouldn't segfault
soln, info = pftrs(n, A_chol_rfp, Bf1)
assert_(info == 0)
assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
soln, info = pftrs(n, A_chol_rfp, B)
assert_(info == 0)
assert_array_almost_equal(solve(A, B), soln,
decimal=4 if ind % 2 == 0 else 6)
def test_sfrk_hfrk():
"""
Test for performing a symmetric rank-k operation for matrix in RFP format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
prefix = 's'if ind < 2 else 'h'
trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
''.format(prefix)),
dtype=dtype)
Afp, _ = trttf(A)
C = np.random.rand(n, 2).astype(dtype)
Afp_out = shfrk(n, 2, -1, C, 2, Afp)
A_out, _ = tfttr(n, Afp_out)
assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
decimal=4 if ind % 2 == 0 else 6)
def test_syconv():
"""
Test for going back and forth between the returned format of he/sytrf to
L and D factors/permutations.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
if ind > 1:
A = (randint(-30, 30, (n, n)) +
randint(-30, 30, (n, n))*1j).astype(dtype)
A = A + A.conj().T
else:
A = randint(-30, 30, (n, n)).astype(dtype)
A = A + A.T + n*eye(n)
tol = 100*np.spacing(dtype(1.0).real)
syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
'sytrf_lwork'), dtype=dtype)
lw = _compute_lwork(trf_lwork, n, lower=1)
L, D, perm = ldl(A, lower=1, hermitian=False)
lw = _compute_lwork(trf_lwork, n, lower=1)
ldu, ipiv, info = trf(A, lower=1, lwork=lw)
a, e, info = syconv(ldu, ipiv, lower=1)
assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
# Test also upper
U, D, perm = ldl(A, lower=0, hermitian=False)
ldu, ipiv, info = trf(A, lower=0)
a, e, info = syconv(ldu, ipiv, lower=0)
assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
class TestBlockedQR:
"""
Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
and tpmqr.
"""
def test_geqrt_gemqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
a, t, info = geqrt(n, A)
assert info == 0
# Extract elementary reflectors from lower triangle, adding the
# main diagonal of ones.
v = np.tril(a, -1) + np.eye(n, dtype=dtype)
# Generate the block Householder transform I - VTV^H
Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
R = np.triu(a)
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
rtol=0.)
assert_allclose(Q @ R, A, atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, info = gemqrt(a, t, C, side=side, trans=trans)
assert info == 0
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
qC = q @ C
else:
qC = C @ q
assert_allclose(c, qC, atol=tol, rtol=0.)
# Test default arguments
if (side, trans) == ('L', 'N'):
c_default, info = gemqrt(a, t, C)
assert info == 0
assert_equal(c_default, c)
# Test invalid side/trans
assert_raises(Exception, gemqrt, a, t, C, side='A')
assert_raises(Exception, gemqrt, a, t, C, trans='A')
def test_tpqrt_tpmqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
B = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
# Test for the range of pentagonal B, from square to upper
# triangular
for l in (0, n // 2, n):
a, b, t, info = tpqrt(l, n, A, B)
assert info == 0
# Check that lower triangular part of A has not been modified
assert_equal(np.tril(a, -1), np.tril(A, -1))
# Check that elements not part of the pentagonal portion of B
# have not been modified.
assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
# Extract pentagonal portion of B
B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
# Generate elementary reflectors
v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
# Generate the block Householder transform I - VTV^H
Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
R = np.concatenate((np.triu(a), np.zeros_like(a)))
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
atol=tol, rtol=0.)
assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
D = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, d, info = tpmqrt(l, b, t, C, D, side=side,
trans=trans)
assert info == 0
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
cd = np.concatenate((c, d), axis=0)
CD = np.concatenate((C, D), axis=0)
qCD = q @ CD
else:
cd = np.concatenate((c, d), axis=1)
CD = np.concatenate((C, D), axis=1)
qCD = CD @ q
assert_allclose(cd, qCD, atol=tol, rtol=0.)
if (side, trans) == ('L', 'N'):
c_default, d_default, info = tpmqrt(l, b, t, C, D)
assert info == 0
assert_equal(c_default, c)
assert_equal(d_default, d)
# Test invalid side/trans
assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
def test_pstrf():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstrf
n = 10
r = 2
pstrf = get_lapack_funcs('pstrf', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstrf(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the following assertion.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstrf(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_pstf2():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstf2
n = 10
r = 2
pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstf2(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the commented assertions.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstf2(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_geequ():
desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
[1.0000, -0.5619, -1.0000, -1.0000],
[0.5874, -1.0000, -0.0596, -0.5341],
[-1.0000, -0.5946, -0.0294, 0.9957]])
desired_cplx = np.array([[-0.2816+0.5359*1j,
0.0812+0.9188*1j,
-0.7439-0.2561*1j],
[-0.3562-0.2954*1j,
0.9566-0.0434*1j,
-0.0174+0.1555*1j],
[0.8607+0.1393*1j,
-0.2759+0.7241*1j,
-0.1642-0.1365*1j]])
for ind, dtype in enumerate(DTYPES):
if ind < 2:
# Use examples from the NAG documentation
A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
[5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
[1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
[-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
A = A.astype(dtype)
else:
A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
[-1.70e+00, 3.31e+10, -0.15e+00],
[2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
[-1.41e+00, -0.15e+10, 1.34e+00],
[0.39e-10, 1.47e+00, -0.69e-10]])*1j
A = A.astype(dtype)
geequ = get_lapack_funcs('geequ', dtype=dtype)
r, c, rowcnd, colcnd, amax, info = geequ(A)
if ind < 2:
assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
else:
assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
def test_syequb():
desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
for ind, dtype in enumerate(DTYPES):
A = np.eye(10, dtype=dtype)
alpha = dtype(1. if ind < 2 else 1.j)
d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
A += np.rot90(np.diag(d))
syequb = get_lapack_funcs('syequb', dtype=dtype)
s, scond, amax, info = syequb(A)
assert_equal(np.log2(s).astype(int), desired_log2s)
@pytest.mark.skipif(True,
reason="Failing on some OpenBLAS version, see gh-12276")
def test_heequb():
# zheequb has a bug for versions =< LAPACK 3.9.0
# See Reference-LAPACK gh-61 and gh-408
# Hence the zheequb test is customized accordingly to avoid
# work scaling.
A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
s, scond, amax, info = lapack.zheequb(A)
assert_equal(info, 0)
assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
A[5, 5] = 1024
A[5, 0] = 16j
s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
assert_equal(info, 0)
assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
def test_getc2_gesc2():
np.random.seed(42)
n = 10
desired_real = np.random.rand(n)
desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
for ind, dtype in enumerate(DTYPES):
if ind < 2:
A = np.random.rand(n, n)
A = A.astype(dtype)
b = A @ desired_real
b = b.astype(dtype)
else:
A = np.random.rand(n, n) + np.random.rand(n, n)*1j
A = A.astype(dtype)
b = A @ desired_cplx
b = b.astype(dtype)
getc2 = get_lapack_funcs('getc2', dtype=dtype)
gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
if ind < 2:
assert_array_almost_equal(desired_real.astype(dtype),
x/scale, decimal=4)
else:
assert_array_almost_equal(desired_cplx.astype(dtype),
x/scale, decimal=4)
@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
@pytest.mark.parametrize('jobr', [0, 1])
@pytest.mark.parametrize('jobp', [0, 1])
def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
"""Test the lapack routine ?gejsv.
This function tests that a singular value decomposition can be performed
on the random M-by-N matrix A. The test performs the SVD using ?gejsv
then performs the following checks:
* ?gejsv exist successfully (info == 0)
* The returned singular values are correct
* `A` can be reconstructed from `u`, `SIGMA`, `v`
* Ensure that u.T @ u is the identity matrix
* Ensure that v.T @ v is the identity matrix
* The reported matrix rank
* The reported number of singular values
* If denormalized floats are required
Notes
-----
joba specifies several choices effecting the calculation's accuracy
Although all arguments are tested, the tests only check that the correct
solution is returned - NOT that the prescribed actions are performed
internally.
jobt is, as of v3.9.0, still experimental and removed to cut down number of
test cases. However keyword itself is tested externally.
"""
seed(42)
# Define some constants for later use:
m, n = size
atol = 100 * np.finfo(dtype).eps
A = generate_random_dtype_array(size, dtype)
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# Set up checks for invalid job? combinations
# if an invalid combination occurs we set the appropriate
# exit status.
lsvec = jobu < 2 # Calculate left singular vectors
rsvec = jobv < 2 # Calculate right singular vectors
l2tran = (jobt == 1) and (m == n)
is_complex = np.iscomplexobj(A)
invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
# Set the exit status to the expected value.
# Here we only check for invalid combinations, not individual
# parameters.
if invalid_cplx_jobu:
exit_status = -2
elif invalid_real_jobv or invalid_cplx_jobv:
exit_status = -3
else:
exit_status = 0
if (jobu > 1) and (jobv == 1):
assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
else:
sva, u, v, work, iwork, info = gejsv(A,
joba=joba,
jobu=jobu,
jobv=jobv,
jobr=jobr,
jobt=jobt,
jobp=jobp)
# Check that ?gejsv exited successfully/as expected
assert_equal(info, exit_status)
# If exit_status is non-zero the combination of jobs is invalid.
# We test this above but no calculations are performed.
if not exit_status:
# Check the returned singular values
sigma = (work[0] / work[1]) * sva[:n]
assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
if jobu == 1:
# If JOBU = 'F', then u contains the M-by-M matrix of
# the left singular vectors, including an ONB of the orthogonal
# complement of the Range(A)
# However, to recalculate A we are concerned about the
# first n singular values and so can ignore the latter.
# TODO: Add a test for ONB?
u = u[:, :n]
if lsvec and rsvec:
assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
if lsvec:
assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
if rsvec:
assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
assert_equal(iwork[0], np.linalg.matrix_rank(A))
assert_equal(iwork[1], np.count_nonzero(sigma))
# iwork[2] is non-zero if requested accuracy is not warranted for
# the data. This should never occur for these tests.
assert_equal(iwork[2], 0)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_gejsv_edge_arguments(dtype):
"""Test edge arguments return expected status"""
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# scalar A
sva, u, v, work, iwork, info = gejsv(1.)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 1d A
A = np.ones((1,), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 2d empty A
A = np.ones((1, 0), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 0))
assert_equal(v.shape, (1, 0))
assert_equal(sva, np.array([], dtype=dtype))
# make sure "overwrite_a" is respected - user reported in gh-13191
A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
A = np.asfortranarray(A + A.T) # make it symmetric and column major
Ac = A.copy('A')
_ = gejsv(A)
assert_allclose(A, Ac)
@pytest.mark.parametrize(('kwargs'),
({'joba': 9},
{'jobu': 9},
{'jobv': 9},
{'jobr': 9},
{'jobt': 9},
{'jobp': 9})
)
def test_gejsv_invalid_job_arguments(kwargs):
"""Test invalid job arguments raise an Exception"""
A = np.ones((2, 2), dtype=float)
gejsv = get_lapack_funcs('gejsv', dtype=float)
assert_raises(Exception, gejsv, A, **kwargs)
@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
[(np.array([[2.27, -1.54, 1.15, -1.94],
[0.28, -1.67, 0.94, -0.78],
[-0.48, -3.09, 0.99, -0.21],
[1.07, 1.22, 0.79, 0.63],
[-2.35, 2.93, -1.45, 2.30],
[0.62, -7.39, 1.03, -2.57]]),
np.array([9.9966, 3.6831, 1.3569, 0.5000]),
np.array([[0.2774, -0.6003, -0.1277, 0.1323],
[0.2020, -0.0301, 0.2805, 0.7034],
[0.2918, 0.3348, 0.6453, 0.1906],
[-0.0938, -0.3699, 0.6781, -0.5399],
[-0.4213, 0.5266, 0.0413, -0.0575],
[0.7816, 0.3353, -0.1645, -0.3957]]),
np.array([[0.1921, -0.8030, 0.0041, -0.5642],
[-0.8794, -0.3926, -0.0752, 0.2587],
[0.2140, -0.2980, 0.7827, 0.5027],
[-0.3795, 0.3351, 0.6178, -0.6017]]))])
def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
"""
This test implements the example found in the NAG manual, f08khf.
An example was not found for the complex case.
"""
# NAG manual provides accuracy up to 4 decimals
atol = 1e-4
gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_allclose(sva_expect, sva, atol=atol)
assert_allclose(u_expect, u, atol=atol)
assert_allclose(v_expect, v, atol=atol)
@pytest.mark.parametrize("dtype", DTYPES)
def test_gttrf_gttrs(dtype):
# The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
# tests that the output of ?gttrf define LU matricies, that input
# parameters are unmodified, transposal options function correctly, that
# incompatible matrix shapes raise an error, and singular matrices return
# non zero info.
seed(42)
n = 10
atol = 100 * np.finfo(dtype).eps
# create the matrix in accordance with the data type
du = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
dl = generate_random_dtype_array((n-1,), dtype=dtype)
diag_cpy = [dl.copy(), d.copy(), du.copy()]
A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
x = np.random.rand(n)
b = A @ x
gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
# test to assure that the inputs of ?gttrf are unmodified
assert_array_equal(dl, diag_cpy[0])
assert_array_equal(d, diag_cpy[1])
assert_array_equal(du, diag_cpy[2])
# generate L and U factors from ?gttrf return values
# L/U are lower/upper triangular by construction (initially and at end)
U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
L = np.eye(n, dtype=dtype)
for i, m in enumerate(_dl):
# L is given in a factored form.
# See
# www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
piv = ipiv[i] - 1
# right multiply by permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# right multiply by Li, rank-one modification of identity
L[:, i] += L[:, i+1]*m
# one last permutation
i, piv = -1, ipiv[-1] - 1
# right multiply by final permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# check that the outputs of ?gttrf define an LU decomposition of A
assert_allclose(A, L @ U, atol=atol)
b_cpy = b.copy()
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
# test that the inputs of ?gttrs are unmodified
assert_array_equal(b, b_cpy)
# test that the result of ?gttrs matches the expected input
assert_allclose(x, x_gttrs, atol=atol)
# test that ?gttrf and ?gttrs work with transposal options
if dtype in REAL_DTYPES:
trans = "T"
b_trans = A.T @ x
else:
trans = "C"
b_trans = A.conj().T @ x
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
assert_allclose(x, x_gttrs, atol=atol)
# test that ValueError is raised with incompatible matrix shapes
with assert_raises(ValueError):
gttrf(dl[:-1], d, du)
with assert_raises(ValueError):
gttrf(dl, d[:-1], du)
with assert_raises(ValueError):
gttrf(dl, d, du[:-1])
# test that matrix of size n=2 raises exception
with assert_raises(Exception):
gttrf(dl[0], d[:1], du[0])
# test that singular (row of all zeroes) matrix fails via info
du[0] = 0
d[0] = 0
__dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
np.testing.assert_(__d[info - 1] == 0,
"?gttrf: _d[info-1] is {}, not the illegal value :0."
.format(__d[info - 1]))
@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([2.3, -5, -.9, 7.1]),
np.array([3.4, 3.6, 7, -6, -1.015373]),
np.array([-1, 1.9, 8]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.7, 6.6],
[-0.5, 10.8],
[2.6, -3.2],
[0.6, -11.2],
[2.7, 19.1]
]),
np.array([[-4, 5],
[7, -4],
[3, -3],
[-4, -2],
[-3, 1]])),
(
np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j,
-1.3 + 3.3j, - .3 + 4.3j,
-3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
# du exp
np.array([-1.3 + 1.3j, -1.3 + 3.3j,
-0.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
-1.3399 + 0.2875j]),
np.array([2 + 1j, -1 + 1j, 1 - 1j]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, - 6.9 - 5.3j],
[-14.7 + 9.7j, - 6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j],
[3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j],
[-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]])
)])
def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
du2_exp, ipiv_exp, b, x):
# test to assure that wrapper is consistent with NAG Library Manual Mark 26
# example problems: f07cdf and f07cef (real)
# examples: f07crf and f07csf (complex)
# (Links may expire, so search for "NAG Library Manual Mark 26" online)
gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
assert_allclose(du2, du2_exp)
assert_allclose(_du, du_exp)
assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
assert_allclose(ipiv, ipiv_exp)
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
assert_allclose(x_gttrs, x)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
def test_geqrfp_lwork(dtype, shape):
geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrfp_lwork(m=m, n=n)
assert_equal(info, 0)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs(ddtype, dtype):
seed(42)
# set test tolerance appropriate for dtype
atol = 100*np.finfo(dtype).eps
# n is the length diagonal of A
n = 10
# create diagonals according to size and dtype
# diagonal d should always be real.
# add 4 to d so it will be dominant for all dtypes
d = generate_random_dtype_array((n,), ddtype) + 4
# diagonal e may be real or complex.
e = generate_random_dtype_array((n-1,), dtype)
# assemble diagonals together into matrix
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
# store a copy of diagonals to later verify
diag_cpy = [d.copy(), e.copy()]
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
_d, _e, info = pttrf(d, e)
# test to assure that the inputs of ?pttrf are unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_equal(info, 0, err_msg=f"pttrf: info = {info}, should be 0")
# test that the factors from pttrf can be recombined to make A
L = np.diag(_e, -1) + np.diag(np.ones(n))
D = np.diag(_d)
assert_allclose(A, L@D@L.conjugate().T, atol=atol)
# generate random solution x
x = generate_random_dtype_array((n,), dtype)
# determine accompanying b to get soln x
b = A@x
# determine _x from pttrs
pttrs = get_lapack_funcs('pttrs', dtype=dtype)
_x, info = pttrs(_d, _e.conj(), b)
assert_equal(info, 0, err_msg=f"pttrs: info = {info}, should be 0")
# test that _x from pttrs matches the expected x
assert_allclose(x, _x, atol=atol)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that ValueError is raised with incompatible matrix shapes
assert_raises(ValueError, pttrf, d[:-1], e)
assert_raises(ValueError, pttrf, d, e[:-1])
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that singular (row of all zeroes) matrix fails via info
d[0] = 0
e[0] = 0
_d, _e, info = pttrf(d, e)
assert_equal(_d[info - 1], 0,
"?pttrf: _d[info-1] is {}, not the illegal value :0."
.format(_d[info - 1]))
# test with non-spd matrix
d = generate_random_dtype_array((n,), ddtype)
_d, _e, info = pttrf(d, e)
assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([4, 9, 25, 16, 1]),
np.array([-.5, -.6667, .6, .5]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
[3, -5]])
), (
np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([16, 9, 1, 4]),
np.array([1+1j, 2-1j, 1-4j]),
np.array([[64+16j, -16-32j], [93+62j, 61-66j],
[78-80j, 71-74j], [14-27j, 35+15j]]),
np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
[1-1j, 2+1j]])
)])
def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problems: f07jdf and f07jef (real)
# examples: f07jrf and f07csf (complex)
# NAG examples provide 4 decimals.
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
atol = 1e-4
pttrf = get_lapack_funcs('pttrf', dtype=e[0])
_d, _e, info = pttrf(d, e)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(_e, e_expect, atol=atol)
pttrs = get_lapack_funcs('pttrs', dtype=e[0])
_x, info = pttrs(_d, _e.conj(), b)
assert_allclose(_x, x_expect, atol=atol)
# also test option `lower`
if e.dtype in COMPLEX_DTYPES:
_x, info = pttrs(_d, _e, b, lower=1)
assert_allclose(_x, x_expect, atol=atol)
def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
# used by ?pteqr tests to build parameters
# returns tuple of (d, e, A, z)
if compute_z == 1:
# build Hermitian A from Q**T * tri * Q = A by creating Q and tri
A_eig = generate_random_dtype_array((n, n), dtype)
A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
A_eig = (A_eig + A_eig.conj().T) / 2
# obtain right eigenvectors (orthogonal)
vr = eigh(A_eig)[1]
# create tridiagonal matrix
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), realtype)
tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
# Build A using these factors that sytrd would: (Q**T * tri * Q = A)
A = vr @ tri @ vr.conj().T
# vr is orthogonal
z = vr
else:
# d and e are always real per lapack docs.
d = generate_random_dtype_array((n,), realtype)
e = generate_random_dtype_array((n-1,), realtype)
# make SPD
d = d + 4
A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
return (d, e, A, z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr(dtype, realtype, compute_z):
'''
Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
It generates random SPD matrix diagonals d and e, and then confirms
correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
tests that z can reform A.
'''
seed(42)
atol = 1000*np.finfo(dtype).eps
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_equal(info, 0, f"info = {info}, should be 0.")
# compare the routine's eigenvalues with scipy.linalg.eig's.
assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
if compute_z:
# verify z_pteqr as orthogonal
assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
atol=atol)
# verify that z_pteqr recombines to A
assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
A, atol=atol)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_non_spd(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with non-spd matrix
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with incorrect/incompatible array sizes
assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
if compute_z:
assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_singular(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with singular matrix
d[0] = 0
e[0] = 0
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
[(2, # "I"
np.array([4.16, 5.25, 1.09, .62]),
np.array([3.17, -.97, .55]),
np.array([8.0023, 1.9926, 1.0014, 0.1237]),
np.array([[0.6326, 0.6245, -0.4191, 0.1847],
[0.7668, -0.4270, 0.4176, -0.2352],
[-0.1082, 0.6071, 0.4594, -0.6393],
[-0.0081, 0.2432, 0.6625, 0.7084]])),
])
def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
'''
Implements real (f08jgf) example from NAG Manual Mark 26.
Tests for correct outputs.
'''
# the NAG manual has 4 decimals accuracy
atol = 1e-4
pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
_d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
def test_geqrfp(dtype, matrix_size):
# Tests for all dytpes, tall, wide, and square matrices.
# Using the routine with random matrix A, Q and R are obtained and then
# tested such that R is upper triangular and non-negative on the diagonal,
# and Q is an orthagonal matrix. Verifies that A=Q@R. It also
# tests against a matrix that for which the linalg.qr method returns
# negative diagonals, and for error messaging.
# set test tolerance appropriate for dtype
np.random.seed(42)
rtol = 250*np.finfo(dtype).eps
atol = 100*np.finfo(dtype).eps
# get appropriate ?geqrfp for dtype
geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
m, n = matrix_size
# create random matrix of dimentions m x n
A = generate_random_dtype_array((m, n), dtype=dtype)
# create qr matrix using geqrfp
qr_A, tau, info = geqrfp(A)
# obtain r from the upper triangular area
r = np.triu(qr_A)
# obtain q from the orgqr lapack routine
# based on linalg.qr's extraction strategy of q with orgqr
if m > n:
# this adds an extra column to the end of qr_A
# let qqr be an empty m x m matrix
qqr = np.zeros((m, m), dtype=dtype)
# set first n columns of qqr to qr_A
qqr[:, :n] = qr_A
# determine q from this qqr
# note that m is a sufficient for lwork based on LAPACK documentation
q = gqr(qqr, tau=tau, lwork=m)[0]
else:
q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
# test that q and r still make A
assert_allclose(q@r, A, rtol=rtol)
# ensure that q is orthogonal (that q @ transposed q is the identity)
assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
atol=atol)
# ensure r is upper tri by comparing original r to r as upper triangular
assert_allclose(r, np.triu(r), rtol=rtol)
# make sure diagonals of r are positive for this random solution
assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
# ensure that info is zero for this success
assert_(info == 0)
# test that this routine gives r diagonals that are positive for a
# matrix that returns negatives in the diagonal with scipy.linalg.rq
A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
r_rq_neg, q_rq_neg = qr(A_negative)
rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
# assert that any of the entries on the diagonal from linalg.qr
# are negative and that all of geqrfp are positive.
assert_(np.any(np.diag(r_rq_neg) < 0) and
np.all(np.diag(r) > 0))
def test_geqrfp_errors_with_empty_array():
# check that empty array raises good error message
A_empty = np.array([])
geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
assert_raises(Exception, geqrfp, A_empty)
@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_standard_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
try:
_compute_lwork(sc_dlw, n, lower=1)
_compute_lwork(dz_dlw, n, lower=1)
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("driver", ['gv', 'gvx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_generalized_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
# Shouldn't raise any exceptions
try:
_compute_lwork(sc_dlw, n, uplo="L")
_compute_lwork(dz_dlw, n, uplo="L")
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("dtype_", DTYPES)
@pytest.mark.parametrize("m", [1, 10, 100, 1000])
def test_orcsd_uncsd_lwork(dtype_, m):
seed(1234)
p = randint(0, m)
q = m - p
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
dlw = pfx + 'csd_lwork'
lw = get_lapack_funcs(dlw, dtype=dtype_)
lwval = _compute_lwork(lw, m, p, q)
lwval = lwval if pfx == 'un' else (lwval,)
assert all([x > 0 for x in lwval])
@pytest.mark.parametrize("dtype_", DTYPES)
def test_orcsd_uncsd(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'], lwval))
cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
assert info == 0
U = block_diag(u1, u2)
VH = block_diag(v1t, v2t)
r = min(min(p, q), min(m-p, m-q))
n11 = min(p, q) - r
n12 = min(p, m-q) - r
n21 = min(m-p, q) - r
n22 = min(m-p, m-q) - r
S = np.zeros((m, m), dtype=dtype_)
one = dtype_(1.)
for i in range(n11):
S[i, i] = one
for i in range(n22):
S[p+i, q+i] = one
for i in range(n12):
S[i+n11+r, i+n11+r+n21+n22+r] = -one
for i in range(n21):
S[p+n22+r+i, n11+r+i] = one
for i in range(r):
S[i+n11, i+n11] = np.cos(theta[i])
S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
S[p+n22+i, i+n11] = np.sin(theta[i])
Xc = U @ S @ VH
assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx(dtype, trans_bool, fact):
"""
These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
It tests that the outputs define an LU matrix, that inputs are unmodified,
transposal options, incompatible shapes, singular matrices, and
singular factorizations. It parametrizes DTYPES and the 'fact' value along
with the fact related inputs.
"""
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
b = (A.conj().T if trans_bool else A) @ x
# store a copy of the inputs to check they haven't been modified later
inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_(info == 0, f"?gtsvx info = {info}, should be zero")
# assure that inputs are unmodified
assert_array_equal(dl, inputs_cpy[0])
assert_array_equal(d, inputs_cpy[1])
assert_array_equal(du, inputs_cpy[2])
assert_array_equal(b, inputs_cpy[3])
# test that x_soln matches the expected x
assert_allclose(x, x_soln, atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert_(hasattr(rcond, "__len__") is not True,
f"rcond should be scalar but is {rcond}")
# ferr should be length of # of cols in x
assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
.format(ferr.shape[0], b.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
.format(berr.shape[0], b.shape[1]))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [0, 1])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_singular(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test with singular matrix
# no need to test inputs with fact "F" since ?gttrf already does.
if fact == "N":
# Construct a singular example manually
d[-1] = 0
dl[-1] = 0
# solve using routine
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test for the singular matrix.
assert info > 0, "info should be > 0 for singular matrix"
elif fact == 'F':
# assuming that a singular factorization is input
df_[-1] = 0
duf_[-1] = 0
du2f_[-1] = 0
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# info should not be zero and should provide index of illegal value
assert info > 0, "info should be > 0 for singular matrix"
@pytest.mark.parametrize("dtype", DTYPES*2)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
if fact == "N":
assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
else:
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
@pytest.mark.parametrize("du,d,dl,b,x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
[.6, -11.2], [2.7, 19.1]]),
np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
[-3, 1]])),
(np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
-.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, -6.9 - 5.3j],
[-14.7 + 9.7j, -6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]]))])
def test_gtsvx_NAG(du, d, dl, b, x):
# Test to ensure wrapper is consistent with NAG Manual Mark 26
# example problems: real (f07cbf) and complex (f07cpf)
gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_array_almost_equal(x, x_soln)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx(dtype, realtype, fact, df_de_lambda):
'''
This tests the ?ptsvx lapack routine wrapper to solve a random system
Ax = b for all dtypes and input variations. Tests for: unmodified
input parameters, fact options, incompatible matrix shapes raise an error,
and singular matrices return info of illegal value.
'''
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# create copy to later test that they are unmodified
diag_cpy = [d.copy(), e.copy(), b.copy()]
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
# d, e, and b should be unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_array_equal(b, diag_cpy[2])
assert_(info == 0, f"info should be 0 but is {info}.")
assert_array_almost_equal(x_soln, x)
# test that the factors from ptsvx can be recombined to make A
L = np.diag(ef, -1) + np.diag(np.ones(n))
D = np.diag(df)
assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert not hasattr(rcond, "__len__"), \
f"rcond should be scalar but is {rcond}"
# ferr should be length of # of cols in x
assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)"
.format(ferr.shape, x_soln.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)"
.format(berr.shape, x_soln.shape[1]))
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# test with malformatted array sizes
assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
if fact == "N":
d[3] = 0
# obtain new df, ef
df, ef, info = df_de_lambda(d, e)
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
# test for the singular matrix.
assert info > 0 and info <= n
# non SPD matrix
d = generate_random_dtype_array((n,), realtype)
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
assert info > 0 and info <= n
else:
# assuming that someone is using a singular factorization
df, ef, info = df_de_lambda(d, e)
df[0] = 0
ef[0] = 0
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
assert info > 0
@pytest.mark.parametrize('d,e,b,x',
[(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3],
[-1, 6], [3, -5]])),
(np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([[64 + 16j, -16 - 32j],
[93 + 62j, 61 - 66j],
[78 - 80j, 71 - 74j],
[14 - 27j, 35 + 15j]]),
np.array([[2 + 1j, -3 - 2j],
[1 + 1j, 1 + 1j],
[1 - 2j, 1 - 2j],
[1 - 1j, 2 + 1j]]))])
def test_ptsvx_NAG(d, e, b, x):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problemss: f07jbf, f07jpf
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
# obtain routine with correct type based on e.dtype
ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
# solve using routine
df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
# determine ptsvx's solution and x are the same.
assert_array_almost_equal(x, x_ptsvx)
@pytest.mark.parametrize('lower', [False, True])
@pytest.mark.parametrize('dtype', DTYPES)
def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower):
seed(1234)
atol = np.finfo(dtype).eps*100
# Manual conversion to/from packed format is feasible here.
n, nrhs = 10, 4
a = generate_random_dtype_array([n, n], dtype=dtype)
b = generate_random_dtype_array([n, nrhs], dtype=dtype)
a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.)
if lower:
inds = ([x for y in range(n) for x in range(y, n)],
[y for y in range(n) for x in range(y, n)])
else:
inds = ([x for y in range(1, n+1) for x in range(y)],
[y-1 for y in range(1, n+1) for x in range(y)])
ap = a[inds]
ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs(
('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'),
dtype=dtype,
ilp64="preferred")
ul, info = pptrf(n, ap, lower=lower)
assert_equal(info, 0)
aul = cholesky(a, lower=lower)[inds]
assert_allclose(ul, aul, rtol=0, atol=atol)
uli, info = pptri(n, ul, lower=lower)
assert_equal(info, 0)
auli = inv(a)[inds]
assert_allclose(uli, auli, rtol=0, atol=atol)
x, info = pptrs(n, ul, b, lower=lower)
assert_equal(info, 0)
bx = solve(a, b)
assert_allclose(x, bx, rtol=0, atol=atol)
xv, info = ppsv(n, ap, b, lower=lower)
assert_equal(info, 0)
assert_allclose(xv, bx, rtol=0, atol=atol)
anorm = np.linalg.norm(a, 1)
rcond, info = ppcon(n, ap, anorm=anorm, lower=lower)
assert_equal(info, 0)
assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1)
@pytest.mark.parametrize('dtype', DTYPES)
def test_gees_trexc(dtype):
seed(1234)
atol = np.finfo(dtype).eps*100
n = 10
a = generate_random_dtype_array([n, n], dtype=dtype)
gees, trexc = get_lapack_funcs(('gees', 'trexc'), dtype=dtype)
result = gees(lambda x: None, a, overwrite_a=False)
assert_equal(result[-1], 0)
t = result[0]
z = result[-3]
d2 = t[6, 6]
if dtype in COMPLEX_DTYPES:
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
result = trexc(t, z, 7, 1)
assert_equal(result[-1], 0)
t = result[0]
z = result[-2]
if dtype in COMPLEX_DTYPES:
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(t[0, 0], d2, rtol=0, atol=atol)
@pytest.mark.parametrize(
"t, expect, ifst, ilst",
[(np.array([[0.80, -0.11, 0.01, 0.03],
[0.00, -0.10, 0.25, 0.35],
[0.00, -0.65, -0.10, 0.20],
[0.00, 0.00, 0.00, -0.10]]),
np.array([[-0.1000, -0.6463, 0.0874, 0.2010],
[0.2514, -0.1000, 0.0927, 0.3505],
[0.0000, 0.0000, 0.8000, -0.0117],
[0.0000, 0.0000, 0.0000, -0.1000]]),
2, 1),
(np.array([[-6.00 - 7.00j, 0.36 - 0.36j, -0.19 + 0.48j, 0.88 - 0.25j],
[0.00 + 0.00j, -5.00 + 2.00j, -0.03 - 0.72j, -0.23 + 0.13j],
[0.00 + 0.00j, 0.00 + 0.00j, 8.00 - 1.00j, 0.94 + 0.53j],
[0.00 + 0.00j, 0.00 + 0.00j, 0.00 + 0.00j, 3.00 - 4.00j]]),
np.array([[-5.0000 + 2.0000j, -0.1574 + 0.7143j,
0.1781 - 0.1913j, 0.3950 + 0.3861j],
[0.0000 + 0.0000j, 8.0000 - 1.0000j,
1.0742 + 0.1447j, 0.2515 - 0.3397j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j,
3.0000 - 4.0000j, 0.2264 + 0.8962j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j,
0.0000 + 0.0000j, -6.0000 - 7.0000j]]),
1, 4)])
def test_trexc_NAG(t, ifst, ilst, expect):
"""
This test implements the example found in the NAG manual,
f08qfc, f08qtc, f08qgc, f08quc.
"""
# NAG manual provides accuracy up to 4 decimals
atol = 1e-4
trexc = get_lapack_funcs('trexc', dtype=t.dtype)
result = trexc(t, t, ifst, ilst, wantq=0)
assert_equal(result[-1], 0)
t = result[0]
assert_allclose(expect, t, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_gges_tgexc(dtype):
if (
dtype == np.float32 and
sys.platform == 'darwin' and
blas_provider == 'openblas' and
blas_version < '0.3.21.dev'
):
pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949")
seed(1234)
atol = np.finfo(dtype).eps*100
n = 10
a = generate_random_dtype_array([n, n], dtype=dtype)
b = generate_random_dtype_array([n, n], dtype=dtype)
gges, tgexc = get_lapack_funcs(('gges', 'tgexc'), dtype=dtype)
result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
assert_equal(result[-1], 0)
s = result[0]
t = result[1]
q = result[-4]
z = result[-3]
d1 = s[0, 0] / t[0, 0]
d2 = s[6, 6] / t[6, 6]
if dtype in COMPLEX_DTYPES:
assert_allclose(s, np.triu(s), rtol=0, atol=atol)
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
result = tgexc(s, t, q, z, 7, 1)
assert_equal(result[-1], 0)
s = result[0]
t = result[1]
q = result[2]
z = result[3]
if dtype in COMPLEX_DTYPES:
assert_allclose(s, np.triu(s), rtol=0, atol=atol)
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_gees_trsen(dtype):
seed(1234)
atol = np.finfo(dtype).eps*100
n = 10
a = generate_random_dtype_array([n, n], dtype=dtype)
gees, trsen, trsen_lwork = get_lapack_funcs(
('gees', 'trsen', 'trsen_lwork'), dtype=dtype)
result = gees(lambda x: None, a, overwrite_a=False)
assert_equal(result[-1], 0)
t = result[0]
z = result[-3]
d2 = t[6, 6]
if dtype in COMPLEX_DTYPES:
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
select = np.zeros(n)
select[6] = 1
lwork = _compute_lwork(trsen_lwork, select, t)
if dtype in COMPLEX_DTYPES:
result = trsen(select, t, z, lwork=lwork)
else:
result = trsen(select, t, z, lwork=lwork, liwork=lwork[1])
assert_equal(result[-1], 0)
t = result[0]
z = result[1]
if dtype in COMPLEX_DTYPES:
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(t[0, 0], d2, rtol=0, atol=atol)
@pytest.mark.parametrize(
"t, q, expect, select, expect_s, expect_sep",
[(np.array([[0.7995, -0.1144, 0.0060, 0.0336],
[0.0000, -0.0994, 0.2478, 0.3474],
[0.0000, -0.6483, -0.0994, 0.2026],
[0.0000, 0.0000, 0.0000, -0.1007]]),
np.array([[0.6551, 0.1037, 0.3450, 0.6641],
[0.5236, -0.5807, -0.6141, -0.1068],
[-0.5362, -0.3073, -0.2935, 0.7293],
[0.0956, 0.7467, -0.6463, 0.1249]]),
np.array([[0.3500, 0.4500, -0.1400, -0.1700],
[0.0900, 0.0700, -0.5399, 0.3500],
[-0.4400, -0.3300, -0.0300, 0.1700],
[0.2500, -0.3200, -0.1300, 0.1100]]),
np.array([1, 0, 0, 1]),
1.75e+00, 3.22e+00),
(np.array([[-6.0004 - 6.9999j, 0.3637 - 0.3656j,
-0.1880 + 0.4787j, 0.8785 - 0.2539j],
[0.0000 + 0.0000j, -5.0000 + 2.0060j,
-0.0307 - 0.7217j, -0.2290 + 0.1313j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j,
7.9982 - 0.9964j, 0.9357 + 0.5359j],
[0.0000 + 0.0000j, 0.0000 + 0.0000j,
0.0000 + 0.0000j, 3.0023 - 3.9998j]]),
np.array([[-0.8347 - 0.1364j, -0.0628 + 0.3806j,
0.2765 - 0.0846j, 0.0633 - 0.2199j],
[0.0664 - 0.2968j, 0.2365 + 0.5240j,
-0.5877 - 0.4208j, 0.0835 + 0.2183j],
[-0.0362 - 0.3215j, 0.3143 - 0.5473j,
0.0576 - 0.5736j, 0.0057 - 0.4058j],
[0.0086 + 0.2958j, -0.3416 - 0.0757j,
-0.1900 - 0.1600j, 0.8327 - 0.1868j]]),
np.array([[-3.9702 - 5.0406j, -4.1108 + 3.7002j,
-0.3403 + 1.0098j, 1.2899 - 0.8590j],
[0.3397 - 1.5006j, 1.5201 - 0.4301j,
1.8797 - 5.3804j, 3.3606 + 0.6498j],
[3.3101 - 3.8506j, 2.4996 + 3.4504j,
0.8802 - 1.0802j, 0.6401 - 1.4800j],
[-1.0999 + 0.8199j, 1.8103 - 1.5905j,
3.2502 + 1.3297j, 1.5701 - 3.4397j]]),
np.array([1, 0, 0, 1]),
1.02e+00, 1.82e-01)])
def test_trsen_NAG(t, q, select, expect, expect_s, expect_sep):
"""
This test implements the example found in the NAG manual,
f08qgc, f08quc.
"""
# NAG manual provides accuracy up to 4 and 2 decimals
atol = 1e-4
atol2 = 1e-2
trsen, trsen_lwork = get_lapack_funcs(
('trsen', 'trsen_lwork'), dtype=t.dtype)
lwork = _compute_lwork(trsen_lwork, select, t)
if t.dtype in COMPLEX_DTYPES:
result = trsen(select, t, q, lwork=lwork)
else:
result = trsen(select, t, q, lwork=lwork, liwork=lwork[1])
assert_equal(result[-1], 0)
t = result[0]
q = result[1]
if t.dtype in COMPLEX_DTYPES:
s = result[4]
sep = result[5]
else:
s = result[5]
sep = result[6]
assert_allclose(expect, q @ t @ q.conj().T, atol=atol)
assert_allclose(expect_s, 1 / s, atol=atol2)
assert_allclose(expect_sep, 1 / sep, atol=atol2)
@pytest.mark.parametrize('dtype', DTYPES)
def test_gges_tgsen(dtype):
if (
dtype == np.float32 and
sys.platform == 'darwin' and
blas_provider == 'openblas' and
blas_version < '0.3.21.dev'
):
pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949")
seed(1234)
atol = np.finfo(dtype).eps*100
n = 10
a = generate_random_dtype_array([n, n], dtype=dtype)
b = generate_random_dtype_array([n, n], dtype=dtype)
gges, tgsen, tgsen_lwork = get_lapack_funcs(
('gges', 'tgsen', 'tgsen_lwork'), dtype=dtype)
result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
assert_equal(result[-1], 0)
s = result[0]
t = result[1]
q = result[-4]
z = result[-3]
d1 = s[0, 0] / t[0, 0]
d2 = s[6, 6] / t[6, 6]
if dtype in COMPLEX_DTYPES:
assert_allclose(s, np.triu(s), rtol=0, atol=atol)
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
select = np.zeros(n)
select[6] = 1
lwork = _compute_lwork(tgsen_lwork, select, s, t)
# off-by-one error in LAPACK, see gh-issue #13397
lwork = (lwork[0]+1, lwork[1])
result = tgsen(select, s, t, q, z, lwork=lwork)
assert_equal(result[-1], 0)
s = result[0]
t = result[1]
q = result[-7]
z = result[-6]
if dtype in COMPLEX_DTYPES:
assert_allclose(s, np.triu(s), rtol=0, atol=atol)
assert_allclose(t, np.triu(t), rtol=0, atol=atol)
assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
@pytest.mark.parametrize(
"a, b, c, d, e, f, rans, lans",
[(np.array([[4.0, 1.0, 1.0, 2.0],
[0.0, 3.0, 4.0, 1.0],
[0.0, 1.0, 3.0, 1.0],
[0.0, 0.0, 0.0, 6.0]]),
np.array([[1.0, 1.0, 1.0, 1.0],
[0.0, 3.0, 4.0, 1.0],
[0.0, 1.0, 3.0, 1.0],
[0.0, 0.0, 0.0, 4.0]]),
np.array([[-4.0, 7.0, 1.0, 12.0],
[-9.0, 2.0, -2.0, -2.0],
[-4.0, 2.0, -2.0, 8.0],
[-7.0, 7.0, -6.0, 19.0]]),
np.array([[2.0, 1.0, 1.0, 3.0],
[0.0, 1.0, 2.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 2.0]]),
np.array([[1.0, 1.0, 1.0, 2.0],
[0.0, 1.0, 4.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[-7.0, 5.0, 0.0, 7.0],
[-5.0, 1.0, -8.0, 0.0],
[-1.0, 2.0, -3.0, 5.0],
[-3.0, 2.0, 0.0, 5.0]]),
np.array([[1.0, 1.0, 1.0, 1.0],
[-1.0, 2.0, -1.0, -1.0],
[-1.0, 1.0, 3.0, 1.0],
[-1.0, 1.0, -1.0, 4.0]]),
np.array([[4.0, -1.0, 1.0, -1.0],
[1.0, 3.0, -1.0, 1.0],
[-1.0, 1.0, 2.0, -1.0],
[1.0, -1.0, 1.0, 1.0]]))])
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_tgsyl_NAG(a, b, c, d, e, f, rans, lans, dtype):
atol = 1e-4
tgsyl = get_lapack_funcs(('tgsyl'), dtype=dtype)
rout, lout, scale, dif, info = tgsyl(a, b, c, d, e, f)
assert_equal(info, 0)
assert_allclose(scale, 1.0, rtol=0, atol=np.finfo(dtype).eps*100,
err_msg="SCALE must be 1.0")
assert_allclose(dif, 0.0, rtol=0, atol=np.finfo(dtype).eps*100,
err_msg="DIF must be nearly 0")
assert_allclose(rout, rans, atol=atol,
err_msg="Solution for R is incorrect")
assert_allclose(lout, lans, atol=atol,
err_msg="Solution for L is incorrect")
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('trans', ('N', 'T'))
@pytest.mark.parametrize('ijob', [0, 1, 2, 3, 4])
def test_tgsyl(dtype, trans, ijob):
atol = 1e-3 if dtype == np.float32 else 1e-10
rng = np.random.default_rng(1685779866898198)
m, n = 10, 15
a, d, *_ = qz(rng.uniform(-10, 10, [m, m]).astype(dtype),
rng.uniform(-10, 10, [m, m]).astype(dtype),
output='real')
b, e, *_ = qz(rng.uniform(-10, 10, [n, n]).astype(dtype),
rng.uniform(-10, 10, [n, n]).astype(dtype),
output='real')
c = rng.uniform(-2, 2, [m, n]).astype(dtype)
f = rng.uniform(-2, 2, [m, n]).astype(dtype)
tgsyl = get_lapack_funcs(('tgsyl'), dtype=dtype)
rout, lout, scale, dif, info = tgsyl(a, b, c, d, e, f,
trans=trans, ijob=ijob)
assert info == 0, "INFO is non-zero"
assert scale >= 0.0, "SCALE must be non-negative"
if ijob == 0:
assert_allclose(dif, 0.0, rtol=0, atol=np.finfo(dtype).eps*100,
err_msg="DIF must be 0 for ijob =0")
else:
assert dif >= 0.0, "DIF must be non-negative"
# Only DIF is calculated for ijob = 3/4
if ijob <= 2:
if trans == 'N':
lhs1 = a @ rout - lout @ b
rhs1 = scale*c
lhs2 = d @ rout - lout @ e
rhs2 = scale*f
elif trans == 'T':
lhs1 = np.transpose(a) @ rout + np.transpose(d) @ lout
rhs1 = scale*c
lhs2 = rout @ np.transpose(b) + lout @ np.transpose(e)
rhs2 = -1.0*scale*f
assert_allclose(lhs1, rhs1, atol=atol, rtol=0.,
err_msg='lhs1 and rhs1 do not match')
assert_allclose(lhs2, rhs2, atol=atol, rtol=0.,
err_msg='lhs2 and rhs2 do not match')
| 129,487
| 37.039953
| 80
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/linalg/tests/test_matmul_toeplitz.py
|
"""Test functions for linalg.matmul_toeplitz function
"""
import numpy as np
from scipy.linalg import toeplitz, matmul_toeplitz
from pytest import raises as assert_raises
from numpy.testing import assert_allclose
class TestMatmulToeplitz:
def setup_method(self):
self.rng = np.random.RandomState(42)
self.tolerance = 1.5e-13
def test_real(self):
cases = []
n = 1
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 2
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 101
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, True))
n = 1000
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, 1))
cases.append((x, c, r, False))
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, False))
n = 100
c = self.rng.normal(size=(n, 1))
r = self.rng.normal(size=(n, 1))
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, True))
n = 100
c = self.rng.normal(size=(n, 1))
r = None
x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
cases.append((x, c, r, True, -1))
n = 100
c = self.rng.normal(size=(n, 1))
r = None
x = self.rng.normal(size=n)
cases.append((x, c, r, False))
n = 101
c = self.rng.normal(size=n)
r = self.rng.normal(size=n-27)
x = self.rng.normal(size=(n-27, 1))
cases.append((x, c, r, True))
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n//4)
x = self.rng.normal(size=(n//4, self.rng.randint(1, 10)))
cases.append((x, c, r, True))
[self.do(*i) for i in cases]
def test_complex(self):
n = 127
c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
r = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
x = self.rng.normal(size=(n, 3)) + self.rng.normal(size=(n, 3))*1j
self.do(x, c, r, False)
n = 100
c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
r = self.rng.normal(size=(n//2, 1)) +\
self.rng.normal(size=(n//2, 1))*1j
x = self.rng.normal(size=(n//2, 3)) +\
self.rng.normal(size=(n//2, 3))*1j
self.do(x, c, r, False)
def test_exceptions(self):
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=2*n)
x = self.rng.normal(size=n)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n)
x = self.rng.normal(size=n-1)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
n = 100
c = self.rng.normal(size=n)
r = self.rng.normal(size=n//2)
x = self.rng.normal(size=n//2-1)
assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
# For toeplitz matrices, matmul_toeplitz() should be equivalent to @.
def do(self, x, c, r=None, check_finite=False, workers=None):
if r is None:
actual = matmul_toeplitz(c, x, check_finite, workers)
else:
actual = matmul_toeplitz((c, r), x, check_finite)
desired = toeplitz(c, r) @ x
assert_allclose(actual, desired,
rtol=self.tolerance, atol=self.tolerance)
| 3,870
| 29.722222
| 74
|
py
|
scipy
|
scipy-main/scipy/linalg/tests/test_decomp_update.py
|
import itertools
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
from pytest import raises as assert_raises
from scipy import linalg
import scipy.linalg._decomp_update as _decomp_update
from scipy.linalg._decomp_update import qr_delete, qr_update, qr_insert
def assert_unitary(a, rtol=None, atol=None, assert_sqr=True):
if rtol is None:
rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
if atol is None:
atol = 10*np.finfo(a.dtype).eps
if assert_sqr:
assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square')
aTa = np.dot(a.T.conj(), a)
assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol)
def assert_upper_tri(a, rtol=None, atol=None):
if rtol is None:
rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
if atol is None:
atol = 2*np.finfo(a.dtype).eps
mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_)
assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol)
def check_qr(q, r, a, rtol, atol, assert_sqr=True):
assert_unitary(q, rtol, atol, assert_sqr)
assert_upper_tri(r, rtol, atol)
assert_allclose(q.dot(r), a, rtol=rtol, atol=atol)
def make_strided(arrs):
strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)]
kmax = len(strides)
k = 0
ret = []
for a in arrs:
if a.ndim == 1:
s = strides[k % kmax]
k += 1
base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype)
view = base[s[1]::s[0]]
view[...] = a
elif a.ndim == 2:
s = strides[k % kmax]
t = strides[(k+1) % kmax]
k += 2
base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]),
a.dtype)
view = base[s[1]::s[0], t[1]::t[0]]
view[...] = a
else:
raise ValueError('make_strided only works for ndim = 1 or'
' 2 arrays')
ret.append(view)
return ret
def negate_strides(arrs):
ret = []
for a in arrs:
b = np.zeros_like(a)
if b.ndim == 2:
b = b[::-1, ::-1]
elif b.ndim == 1:
b = b[::-1]
else:
raise ValueError('negate_strides only works for ndim = 1 or'
' 2 arrays')
b[...] = a
ret.append(b)
return ret
def nonitemsize_strides(arrs):
out = []
for a in arrs:
a_dtype = a.dtype
b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')])
c = b.getfield(a_dtype)
c[...] = a
out.append(c)
return out
def make_nonnative(arrs):
return [a.astype(a.dtype.newbyteorder()) for a in arrs]
class BaseQRdeltas:
def setup_method(self):
self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2)
self.atol = 10 * np.finfo(self.dtype).eps
def generate(self, type, mode='full'):
np.random.seed(29382)
shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12),
'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type]
a = np.random.random(shape)
if np.iscomplexobj(self.dtype.type(1)):
b = np.random.random(shape)
a = a + 1j * b
a = a.astype(self.dtype)
q, r = linalg.qr(a, mode=mode)
return a, q, r
class BaseQRdelete(BaseQRdeltas):
def test_sqr_1_row(self):
a, q, r = self.generate('sqr')
for row in range(r.shape[0]):
q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
a1 = np.delete(a, row, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_p_row(self):
a, q, r = self.generate('sqr')
for ndel in range(2, 6):
for row in range(a.shape[0]-ndel):
q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
a1 = np.delete(a, slice(row, row+ndel), 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_1_col(self):
a, q, r = self.generate('sqr')
for col in range(r.shape[1]):
q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
a1 = np.delete(a, col, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_p_col(self):
a, q, r = self.generate('sqr')
for ndel in range(2, 6):
for col in range(r.shape[1]-ndel):
q1, r1 = qr_delete(q, r, col, ndel, which='col',
overwrite_qr=False)
a1 = np.delete(a, slice(col, col+ndel), 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_1_row(self):
a, q, r = self.generate('tall')
for row in range(r.shape[0]):
q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
a1 = np.delete(a, row, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_p_row(self):
a, q, r = self.generate('tall')
for ndel in range(2, 6):
for row in range(a.shape[0]-ndel):
q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
a1 = np.delete(a, slice(row, row+ndel), 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_1_col(self):
a, q, r = self.generate('tall')
for col in range(r.shape[1]):
q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
a1 = np.delete(a, col, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_p_col(self):
a, q, r = self.generate('tall')
for ndel in range(2, 6):
for col in range(r.shape[1]-ndel):
q1, r1 = qr_delete(q, r, col, ndel, which='col',
overwrite_qr=False)
a1 = np.delete(a, slice(col, col+ndel), 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_1_row(self):
a, q, r = self.generate('fat')
for row in range(r.shape[0]):
q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
a1 = np.delete(a, row, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_p_row(self):
a, q, r = self.generate('fat')
for ndel in range(2, 6):
for row in range(a.shape[0]-ndel):
q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
a1 = np.delete(a, slice(row, row+ndel), 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_1_col(self):
a, q, r = self.generate('fat')
for col in range(r.shape[1]):
q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
a1 = np.delete(a, col, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_p_col(self):
a, q, r = self.generate('fat')
for ndel in range(2, 6):
for col in range(r.shape[1]-ndel):
q1, r1 = qr_delete(q, r, col, ndel, which='col',
overwrite_qr=False)
a1 = np.delete(a, slice(col, col+ndel), 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_economic_1_row(self):
# this test always starts and ends with an economic decomp.
a, q, r = self.generate('tall', 'economic')
for row in range(r.shape[0]):
q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
a1 = np.delete(a, row, 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
# for economic row deletes
# eco - prow = eco
# eco - prow = sqr
# eco - prow = fat
def base_economic_p_row_xxx(self, ndel):
a, q, r = self.generate('tall', 'economic')
for row in range(a.shape[0]-ndel):
q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
a1 = np.delete(a, slice(row, row+ndel), 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_p_row_economic(self):
# (12, 7) - (3, 7) = (9,7) --> stays economic
self.base_economic_p_row_xxx(3)
def test_economic_p_row_sqr(self):
# (12, 7) - (5, 7) = (7, 7) --> becomes square
self.base_economic_p_row_xxx(5)
def test_economic_p_row_fat(self):
# (12, 7) - (7,7) = (5, 7) --> becomes fat
self.base_economic_p_row_xxx(7)
def test_economic_1_col(self):
a, q, r = self.generate('tall', 'economic')
for col in range(r.shape[1]):
q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
a1 = np.delete(a, col, 1)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_p_col(self):
a, q, r = self.generate('tall', 'economic')
for ndel in range(2, 6):
for col in range(r.shape[1]-ndel):
q1, r1 = qr_delete(q, r, col, ndel, which='col',
overwrite_qr=False)
a1 = np.delete(a, slice(col, col+ndel), 1)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_1_row(self):
a, q, r = self.generate('Mx1')
for row in range(r.shape[0]):
q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
a1 = np.delete(a, row, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_p_row(self):
a, q, r = self.generate('Mx1')
for ndel in range(2, 6):
for row in range(a.shape[0]-ndel):
q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
a1 = np.delete(a, slice(row, row+ndel), 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_1_col(self):
a, q, r = self.generate('1xN')
for col in range(r.shape[1]):
q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
a1 = np.delete(a, col, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_p_col(self):
a, q, r = self.generate('1xN')
for ndel in range(2, 6):
for col in range(r.shape[1]-ndel):
q1, r1 = qr_delete(q, r, col, ndel, which='col',
overwrite_qr=False)
a1 = np.delete(a, slice(col, col+ndel), 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_economic_1_row(self):
a, q, r = self.generate('Mx1', 'economic')
for row in range(r.shape[0]):
q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
a1 = np.delete(a, row, 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_economic_p_row(self):
a, q, r = self.generate('Mx1', 'economic')
for ndel in range(2, 6):
for row in range(a.shape[0]-ndel):
q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
a1 = np.delete(a, slice(row, row+ndel), 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_delete_last_1_row(self):
# full and eco are the same for 1xN
a, q, r = self.generate('1xN')
q1, r1 = qr_delete(q, r, 0, 1, 'row')
assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
def test_delete_last_p_row(self):
a, q, r = self.generate('tall', 'full')
q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
a, q, r = self.generate('tall', 'economic')
q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
def test_delete_last_1_col(self):
a, q, r = self.generate('Mx1', 'economic')
q1, r1 = qr_delete(q, r, 0, 1, 'col')
assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
a, q, r = self.generate('Mx1', 'full')
q1, r1 = qr_delete(q, r, 0, 1, 'col')
assert_unitary(q1)
assert_(q1.dtype == q.dtype)
assert_(q1.shape == q.shape)
assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
def test_delete_last_p_col(self):
a, q, r = self.generate('tall', 'full')
q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
assert_unitary(q1)
assert_(q1.dtype == q.dtype)
assert_(q1.shape == q.shape)
assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
a, q, r = self.generate('tall', 'economic')
q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
def test_delete_1x1_row_col(self):
a, q, r = self.generate('1x1')
q1, r1 = qr_delete(q, r, 0, 1, 'row')
assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
a, q, r = self.generate('1x1')
q1, r1 = qr_delete(q, r, 0, 1, 'col')
assert_unitary(q1)
assert_(q1.dtype == q.dtype)
assert_(q1.shape == q.shape)
assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
# all full qr, row deletes and single column deletes should be able to
# handle any non negative strides. (only row and column vector
# operations are used.) p column delete require fortran ordered
# Q and R and will make a copy as necessary. Economic qr row deletes
# requre a contigous q.
def base_non_simple_strides(self, adjust_strides, ks, p, which,
overwriteable):
if which == 'row':
qind = (slice(p,None), slice(p,None))
rind = (slice(p,None), slice(None))
else:
qind = (slice(None), slice(None))
rind = (slice(None), slice(None,-p))
for type, k in itertools.product(['sqr', 'tall', 'fat'], ks):
a, q0, r0, = self.generate(type)
qs, rs = adjust_strides((q0, r0))
if p == 1:
a1 = np.delete(a, k, 0 if which == 'row' else 1)
else:
s = slice(k,k+p)
if k < 0:
s = slice(k, k + p +
(a.shape[0] if which == 'row' else a.shape[1]))
a1 = np.delete(a, s, 0 if which == 'row' else 1)
# for each variable, q, r we try with it strided and
# overwrite=False. Then we try with overwrite=True, and make
# sure that q and r are still overwritten.
q = q0.copy('F')
r = r0.copy('F')
q1, r1 = qr_delete(qs, r, k, p, which, False)
check_qr(q1, r1, a1, self.rtol, self.atol)
q1o, r1o = qr_delete(qs, r, k, p, which, True)
check_qr(q1o, r1o, a1, self.rtol, self.atol)
if overwriteable:
assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol)
assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
q2, r2 = qr_delete(q, rs, k, p, which, False)
check_qr(q2, r2, a1, self.rtol, self.atol)
q2o, r2o = qr_delete(q, rs, k, p, which, True)
check_qr(q2o, r2o, a1, self.rtol, self.atol)
if overwriteable:
assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol)
assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
# since some of these were consumed above
qs, rs = adjust_strides((q, r))
q3, r3 = qr_delete(qs, rs, k, p, which, False)
check_qr(q3, r3, a1, self.rtol, self.atol)
q3o, r3o = qr_delete(qs, rs, k, p, which, True)
check_qr(q3o, r3o, a1, self.rtol, self.atol)
if overwriteable:
assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol)
assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol)
def test_non_unit_strides_1_row(self):
self.base_non_simple_strides(make_strided, [0], 1, 'row', True)
def test_non_unit_strides_p_row(self):
self.base_non_simple_strides(make_strided, [0], 3, 'row', True)
def test_non_unit_strides_1_col(self):
self.base_non_simple_strides(make_strided, [0], 1, 'col', True)
def test_non_unit_strides_p_col(self):
self.base_non_simple_strides(make_strided, [0], 3, 'col', False)
def test_neg_strides_1_row(self):
self.base_non_simple_strides(negate_strides, [0], 1, 'row', False)
def test_neg_strides_p_row(self):
self.base_non_simple_strides(negate_strides, [0], 3, 'row', False)
def test_neg_strides_1_col(self):
self.base_non_simple_strides(negate_strides, [0], 1, 'col', False)
def test_neg_strides_p_col(self):
self.base_non_simple_strides(negate_strides, [0], 3, 'col', False)
def test_non_itemize_strides_1_row(self):
self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False)
def test_non_itemize_strides_p_row(self):
self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False)
def test_non_itemize_strides_1_col(self):
self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False)
def test_non_itemize_strides_p_col(self):
self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False)
def test_non_native_byte_order_1_row(self):
self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False)
def test_non_native_byte_order_p_row(self):
self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False)
def test_non_native_byte_order_1_col(self):
self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False)
def test_non_native_byte_order_p_col(self):
self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False)
def test_neg_k(self):
a, q, r = self.generate('sqr')
for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']):
q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False)
if w == 'row':
a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0)
else:
a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'):
assert_sqr = True if mode == 'full' else False
if which == 'row':
qind = (slice(p,None), slice(p,None))
rind = (slice(p,None), slice(None))
else:
qind = (slice(None), slice(None))
rind = (slice(None), slice(None,-p))
a, q0, r0 = self.generate('sqr', mode)
if p == 1:
a1 = np.delete(a, 3, 0 if which == 'row' else 1)
else:
a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1)
# don't overwrite
q = q0.copy('F')
r = r0.copy('F')
q1, r1 = qr_delete(q, r, 3, p, which, False)
check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr)
check_qr(q, r, a, self.rtol, self.atol, assert_sqr)
if test_F:
q = q0.copy('F')
r = r0.copy('F')
q2, r2 = qr_delete(q, r, 3, p, which, True)
check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr)
# verify the overwriting
assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol)
if test_C:
q = q0.copy('C')
r = r0.copy('C')
q3, r3 = qr_delete(q, r, 3, p, which, True)
check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr)
assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol)
assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol)
def test_overwrite_qr_1_row(self):
# any positively strided q and r.
self.base_overwrite_qr('row', 1, True, True)
def test_overwrite_economic_qr_1_row(self):
# Any contiguous q and positively strided r.
self.base_overwrite_qr('row', 1, True, True, 'economic')
def test_overwrite_qr_1_col(self):
# any positively strided q and r.
# full and eco share code paths
self.base_overwrite_qr('col', 1, True, True)
def test_overwrite_qr_p_row(self):
# any positively strided q and r.
self.base_overwrite_qr('row', 3, True, True)
def test_overwrite_economic_qr_p_row(self):
# any contiguous q and positively strided r
self.base_overwrite_qr('row', 3, True, True, 'economic')
def test_overwrite_qr_p_col(self):
# only F orderd q and r can be overwritten for cols
# full and eco share code paths
self.base_overwrite_qr('col', 3, False, True)
def test_bad_which(self):
a, q, r = self.generate('sqr')
assert_raises(ValueError, qr_delete, q, r, 0, which='foo')
def test_bad_k(self):
a, q, r = self.generate('tall')
assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1)
assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1)
assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col')
assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col')
def test_bad_p(self):
a, q, r = self.generate('tall')
# p must be positive
assert_raises(ValueError, qr_delete, q, r, 0, -1)
assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col')
# and nonzero
assert_raises(ValueError, qr_delete, q, r, 0, 0)
assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col')
# must have at least k+p rows or cols, depending.
assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2)
assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col')
def test_empty_q(self):
a, q, r = self.generate('tall')
# same code path for 'row' and 'col'
assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1)
def test_empty_r(self):
a, q, r = self.generate('tall')
# same code path for 'row' and 'col'
assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1)
def test_mismatched_q_and_r(self):
a, q, r = self.generate('tall')
r = r[1:]
assert_raises(ValueError, qr_delete, q, r, 0, 1)
def test_unsupported_dtypes(self):
dts = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float16', 'longdouble', 'longcomplex',
'bool']
a, q0, r0 = self.generate('tall')
for dtype in dts:
q = q0.real.astype(dtype)
with np.errstate(invalid="ignore"):
r = r0.real.astype(dtype)
assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row')
assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col')
assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row')
assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col')
def test_check_finite(self):
a0, q0, r0 = self.generate('tall')
q = q0.copy('F')
q[1,1] = np.nan
assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row')
assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col')
r = r0.copy('F')
r[1,1] = np.nan
assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row')
assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col')
def test_qr_scalar(self):
a, q, r = self.generate('1x1')
assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row')
assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row')
assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col')
assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col')
class TestQRdelete_f(BaseQRdelete):
dtype = np.dtype('f')
class TestQRdelete_F(BaseQRdelete):
dtype = np.dtype('F')
class TestQRdelete_d(BaseQRdelete):
dtype = np.dtype('d')
class TestQRdelete_D(BaseQRdelete):
dtype = np.dtype('D')
class BaseQRinsert(BaseQRdeltas):
def generate(self, type, mode='full', which='row', p=1):
a, q, r = super().generate(type, mode)
assert_(p > 0)
# super call set the seed...
if which == 'row':
if p == 1:
u = np.random.random(a.shape[1])
else:
u = np.random.random((p, a.shape[1]))
elif which == 'col':
if p == 1:
u = np.random.random(a.shape[0])
else:
u = np.random.random((a.shape[0], p))
else:
ValueError('which should be either "row" or "col"')
if np.iscomplexobj(self.dtype.type(1)):
b = np.random.random(u.shape)
u = u + 1j * b
u = u.astype(self.dtype)
return a, q, r, u
def test_sqr_1_row(self):
a, q, r, u = self.generate('sqr', which='row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_p_row(self):
# sqr + rows --> fat always
a, q, r, u = self.generate('sqr', which='row', p=3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_1_col(self):
a, q, r, u = self.generate('sqr', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_p_col(self):
# sqr + cols --> fat always
a, q, r, u = self.generate('sqr', which='col', p=3)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_1_row(self):
a, q, r, u = self.generate('tall', which='row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_p_row(self):
# tall + rows --> tall always
a, q, r, u = self.generate('tall', which='row', p=3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_1_col(self):
a, q, r, u = self.generate('tall', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
# for column adds to tall matrices there are three cases to test
# tall + pcol --> tall
# tall + pcol --> sqr
# tall + pcol --> fat
def base_tall_p_col_xxx(self, p):
a, q, r, u = self.generate('tall', which='col', p=p)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_p_col_tall(self):
# 12x7 + 12x3 = 12x10 --> stays tall
self.base_tall_p_col_xxx(3)
def test_tall_p_col_sqr(self):
# 12x7 + 12x5 = 12x12 --> becomes sqr
self.base_tall_p_col_xxx(5)
def test_tall_p_col_fat(self):
# 12x7 + 12x7 = 12x14 --> becomes fat
self.base_tall_p_col_xxx(7)
def test_fat_1_row(self):
a, q, r, u = self.generate('fat', which='row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
# for row adds to fat matrices there are three cases to test
# fat + prow --> fat
# fat + prow --> sqr
# fat + prow --> tall
def base_fat_p_row_xxx(self, p):
a, q, r, u = self.generate('fat', which='row', p=p)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(p, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_p_row_fat(self):
# 7x12 + 3x12 = 10x12 --> stays fat
self.base_fat_p_row_xxx(3)
def test_fat_p_row_sqr(self):
# 7x12 + 5x12 = 12x12 --> becomes sqr
self.base_fat_p_row_xxx(5)
def test_fat_p_row_tall(self):
# 7x12 + 7x12 = 14x12 --> becomes tall
self.base_fat_p_row_xxx(7)
def test_fat_1_col(self):
a, q, r, u = self.generate('fat', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_p_col(self):
# fat + cols --> fat always
a, q, r, u = self.generate('fat', which='col', p=3)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_economic_1_row(self):
a, q, r, u = self.generate('tall', 'economic', 'row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_p_row(self):
# tall + rows --> tall always
a, q, r, u = self.generate('tall', 'economic', 'row', 3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_1_col(self):
a, q, r, u = self.generate('tall', 'economic', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_1_col_bad_update(self):
# When the column to be added lies in the span of Q, the update is
# not meaningful. This is detected, and a LinAlgError is issued.
q = np.eye(5, 3, dtype=self.dtype)
r = np.eye(3, dtype=self.dtype)
u = np.array([1, 0, 0, 0, 0], self.dtype)
assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col')
# for column adds to economic matrices there are three cases to test
# eco + pcol --> eco
# eco + pcol --> sqr
# eco + pcol --> fat
def base_economic_p_col_xxx(self, p):
a, q, r, u = self.generate('tall', 'economic', which='col', p=p)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_p_col_eco(self):
# 12x7 + 12x3 = 12x10 --> stays eco
self.base_economic_p_col_xxx(3)
def test_economic_p_col_sqr(self):
# 12x7 + 12x5 = 12x12 --> becomes sqr
self.base_economic_p_col_xxx(5)
def test_economic_p_col_fat(self):
# 12x7 + 12x7 = 12x14 --> becomes fat
self.base_economic_p_col_xxx(7)
def test_Mx1_1_row(self):
a, q, r, u = self.generate('Mx1', which='row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_p_row(self):
a, q, r, u = self.generate('Mx1', which='row', p=3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_1_col(self):
a, q, r, u = self.generate('Mx1', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_p_col(self):
a, q, r, u = self.generate('Mx1', which='col', p=3)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_economic_1_row(self):
a, q, r, u = self.generate('Mx1', 'economic', 'row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_economic_p_row(self):
a, q, r, u = self.generate('Mx1', 'economic', 'row', 3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_economic_1_col(self):
a, q, r, u = self.generate('Mx1', 'economic', 'col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_economic_p_col(self):
a, q, r, u = self.generate('Mx1', 'economic', 'col', 3)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_1xN_1_row(self):
a, q, r, u = self.generate('1xN', which='row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_p_row(self):
a, q, r, u = self.generate('1xN', which='row', p=3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_1_col(self):
a, q, r, u = self.generate('1xN', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_p_col(self):
a, q, r, u = self.generate('1xN', which='col', p=3)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_1_row(self):
a, q, r, u = self.generate('1x1', which='row')
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, row, u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_p_row(self):
a, q, r, u = self.generate('1x1', which='row', p=3)
for row in range(r.shape[0] + 1):
q1, r1 = qr_insert(q, r, u, row)
a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_1_col(self):
a, q, r, u = self.generate('1x1', which='col')
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, col, u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_p_col(self):
a, q, r, u = self.generate('1x1', which='col', p=3)
for col in range(r.shape[1] + 1):
q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_1_scalar(self):
a, q, r, u = self.generate('1x1', which='row')
assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row')
assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row')
assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row')
assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col')
assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col')
assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col')
def base_non_simple_strides(self, adjust_strides, k, p, which):
for type in ['sqr', 'tall', 'fat']:
a, q0, r0, u0 = self.generate(type, which=which, p=p)
qs, rs, us = adjust_strides((q0, r0, u0))
if p == 1:
ai = np.insert(a, k, u0, 0 if which == 'row' else 1)
else:
ai = np.insert(a, np.full(p, k, np.intp),
u0 if which == 'row' else u0,
0 if which == 'row' else 1)
# for each variable, q, r, u we try with it strided and
# overwrite=False. Then we try with overwrite=True. Nothing
# is checked to see if it can be overwritten, since only
# F ordered Q can be overwritten when adding columns.
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False)
check_qr(q1, r1, ai, self.rtol, self.atol)
q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True)
check_qr(q1o, r1o, ai, self.rtol, self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False)
check_qr(q2, r2, ai, self.rtol, self.atol)
q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True)
check_qr(q2o, r2o, ai, self.rtol, self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False)
check_qr(q3, r3, ai, self.rtol, self.atol)
q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True)
check_qr(q3o, r3o, ai, self.rtol, self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
# since some of these were consumed above
qs, rs, us = adjust_strides((q, r, u))
q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False)
check_qr(q5, r5, ai, self.rtol, self.atol)
q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True)
check_qr(q5o, r5o, ai, self.rtol, self.atol)
def test_non_unit_strides_1_row(self):
self.base_non_simple_strides(make_strided, 0, 1, 'row')
def test_non_unit_strides_p_row(self):
self.base_non_simple_strides(make_strided, 0, 3, 'row')
def test_non_unit_strides_1_col(self):
self.base_non_simple_strides(make_strided, 0, 1, 'col')
def test_non_unit_strides_p_col(self):
self.base_non_simple_strides(make_strided, 0, 3, 'col')
def test_neg_strides_1_row(self):
self.base_non_simple_strides(negate_strides, 0, 1, 'row')
def test_neg_strides_p_row(self):
self.base_non_simple_strides(negate_strides, 0, 3, 'row')
def test_neg_strides_1_col(self):
self.base_non_simple_strides(negate_strides, 0, 1, 'col')
def test_neg_strides_p_col(self):
self.base_non_simple_strides(negate_strides, 0, 3, 'col')
def test_non_itemsize_strides_1_row(self):
self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row')
def test_non_itemsize_strides_p_row(self):
self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row')
def test_non_itemsize_strides_1_col(self):
self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col')
def test_non_itemsize_strides_p_col(self):
self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col')
def test_non_native_byte_order_1_row(self):
self.base_non_simple_strides(make_nonnative, 0, 1, 'row')
def test_non_native_byte_order_p_row(self):
self.base_non_simple_strides(make_nonnative, 0, 3, 'row')
def test_non_native_byte_order_1_col(self):
self.base_non_simple_strides(make_nonnative, 0, 1, 'col')
def test_non_native_byte_order_p_col(self):
self.base_non_simple_strides(make_nonnative, 0, 3, 'col')
def test_overwrite_qu_rank_1(self):
# when inserting rows, the size of both Q and R change, so only
# column inserts can overwrite q. Only complex column inserts
# with C ordered Q overwrite u. Any contiguous Q is overwritten
# when inserting 1 column
a, q0, r, u, = self.generate('sqr', which='col', p=1)
q = q0.copy('C')
u0 = u.copy()
# don't overwrite
q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
a1 = np.insert(a, 0, u0, 1)
check_qr(q1, r1, a1, self.rtol, self.atol)
check_qr(q, r, a, self.rtol, self.atol)
# try overwriting
q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
check_qr(q2, r2, a1, self.rtol, self.atol)
# verify the overwriting
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(u, u0.conj(), self.rtol, self.atol)
# now try with a fortran ordered Q
qF = q0.copy('F')
u1 = u0.copy()
q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False)
check_qr(q3, r3, a1, self.rtol, self.atol)
check_qr(qF, r, a, self.rtol, self.atol)
# try overwriting
q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True)
check_qr(q4, r4, a1, self.rtol, self.atol)
assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol)
def test_overwrite_qu_rank_p(self):
# when inserting rows, the size of both Q and R change, so only
# column inserts can potentially overwrite Q. In practice, only
# F ordered Q are overwritten with a rank p update.
a, q0, r, u, = self.generate('sqr', which='col', p=3)
q = q0.copy('F')
a1 = np.insert(a, np.zeros(3, np.intp), u, 1)
# don't overwrite
q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
check_qr(q1, r1, a1, self.rtol, self.atol)
check_qr(q, r, a, self.rtol, self.atol)
# try overwriting
q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
check_qr(q2, r2, a1, self.rtol, self.atol)
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
def test_empty_inputs(self):
a, q, r, u = self.generate('sqr', which='row')
assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row')
assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row')
assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row')
assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col')
assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col')
assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col')
def test_mismatched_shapes(self):
a, q, r, u = self.generate('tall', which='row')
assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row')
assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row')
assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row')
assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col')
assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col')
assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col')
def test_unsupported_dtypes(self):
dts = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float16', 'longdouble', 'longcomplex',
'bool']
a, q0, r0, u0 = self.generate('sqr', which='row')
for dtype in dts:
q = q0.real.astype(dtype)
with np.errstate(invalid="ignore"):
r = r0.real.astype(dtype)
u = u0.real.astype(dtype)
assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
def test_check_finite(self):
a0, q0, r0, u0 = self.generate('sqr', which='row', p=3)
q = q0.copy('F')
q[1,1] = np.nan
assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row')
assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col')
assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
r = r0.copy('F')
r[1,1] = np.nan
assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row')
assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col')
assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
u = u0.copy('F')
u[0,0] = np.nan
assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row')
assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col')
assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
class TestQRinsert_f(BaseQRinsert):
dtype = np.dtype('f')
class TestQRinsert_F(BaseQRinsert):
dtype = np.dtype('F')
class TestQRinsert_d(BaseQRinsert):
dtype = np.dtype('d')
class TestQRinsert_D(BaseQRinsert):
dtype = np.dtype('D')
class BaseQRupdate(BaseQRdeltas):
def generate(self, type, mode='full', p=1):
a, q, r = super().generate(type, mode)
# super call set the seed...
if p == 1:
u = np.random.random(q.shape[0])
v = np.random.random(r.shape[1])
else:
u = np.random.random((q.shape[0], p))
v = np.random.random((r.shape[1], p))
if np.iscomplexobj(self.dtype.type(1)):
b = np.random.random(u.shape)
u = u + 1j * b
c = np.random.random(v.shape)
v = v + 1j * c
u = u.astype(self.dtype)
v = v.astype(self.dtype)
return a, q, r, u, v
def test_sqr_rank_1(self):
a, q, r, u, v = self.generate('sqr')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_rank_p(self):
# test ndim = 2, rank 1 updates here too
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('sqr', p=p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_rank_1(self):
a, q, r, u, v = self.generate('tall')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_rank_p(self):
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('tall', p=p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_rank_1(self):
a, q, r, u, v = self.generate('fat')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_rank_p(self):
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('fat', p=p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_economic_rank_1(self):
a, q, r, u, v = self.generate('tall', 'economic')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_rank_p(self):
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('tall', 'economic', p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_rank_1(self):
a, q, r, u, v = self.generate('Mx1')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('Mx1', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_economic_rank_1(self):
a, q, r, u, v = self.generate('Mx1', 'economic')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_economic_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('Mx1', 'economic', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_1xN_rank_1(self):
a, q, r, u, v = self.generate('1xN')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('1xN', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_rank_1(self):
a, q, r, u, v = self.generate('1x1')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('1x1', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_rank_1_scalar(self):
a, q, r, u, v = self.generate('1x1')
assert_raises(ValueError, qr_update, q[0, 0], r, u, v)
assert_raises(ValueError, qr_update, q, r[0, 0], u, v)
assert_raises(ValueError, qr_update, q, r, u[0], v)
assert_raises(ValueError, qr_update, q, r, u, v[0])
def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable):
assert_sqr = False if mode == 'economic' else True
for type in ['sqr', 'tall', 'fat']:
a, q0, r0, u0, v0 = self.generate(type, mode, p)
qs, rs, us, vs = adjust_strides((q0, r0, u0, v0))
if p == 1:
aup = a + np.outer(u0, v0.conj())
else:
aup = a + np.dot(u0, v0.T.conj())
# for each variable, q, r, u, v we try with it strided and
# overwrite=False. Then we try with overwrite=True, and make
# sure that if p == 1, r and v are still overwritten.
# a strided q and u must always be copied.
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q1, r1 = qr_update(qs, r, u, v, False)
check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr)
q1o, r1o = qr_update(qs, r, u, v, True)
check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol)
assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q2, r2 = qr_update(q, rs, u, v, False)
check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr)
q2o, r2o = qr_update(q, rs, u, v, True)
check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol)
assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q3, r3 = qr_update(q, r, us, v, False)
check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr)
q3o, r3o = qr_update(q, r, us, v, True)
check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol)
assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q4, r4 = qr_update(q, r, u, vs, False)
check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr)
q4o, r4o = qr_update(q, r, u, vs, True)
check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol)
assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
# since some of these were consumed above
qs, rs, us, vs = adjust_strides((q, r, u, v))
q5, r5 = qr_update(qs, rs, us, vs, False)
check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr)
q5o, r5o = qr_update(qs, rs, us, vs, True)
check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol)
assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
def test_non_unit_strides_rank_1(self):
self.base_non_simple_strides(make_strided, 'full', 1, True)
def test_non_unit_strides_economic_rank_1(self):
self.base_non_simple_strides(make_strided, 'economic', 1, True)
def test_non_unit_strides_rank_p(self):
self.base_non_simple_strides(make_strided, 'full', 3, False)
def test_non_unit_strides_economic_rank_p(self):
self.base_non_simple_strides(make_strided, 'economic', 3, False)
def test_neg_strides_rank_1(self):
self.base_non_simple_strides(negate_strides, 'full', 1, False)
def test_neg_strides_economic_rank_1(self):
self.base_non_simple_strides(negate_strides, 'economic', 1, False)
def test_neg_strides_rank_p(self):
self.base_non_simple_strides(negate_strides, 'full', 3, False)
def test_neg_strides_economic_rank_p(self):
self.base_non_simple_strides(negate_strides, 'economic', 3, False)
def test_non_itemsize_strides_rank_1(self):
self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False)
def test_non_itemsize_strides_economic_rank_1(self):
self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False)
def test_non_itemsize_strides_rank_p(self):
self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False)
def test_non_itemsize_strides_economic_rank_p(self):
self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False)
def test_non_native_byte_order_rank_1(self):
self.base_non_simple_strides(make_nonnative, 'full', 1, False)
def test_non_native_byte_order_economic_rank_1(self):
self.base_non_simple_strides(make_nonnative, 'economic', 1, False)
def test_non_native_byte_order_rank_p(self):
self.base_non_simple_strides(make_nonnative, 'full', 3, False)
def test_non_native_byte_order_economic_rank_p(self):
self.base_non_simple_strides(make_nonnative, 'economic', 3, False)
def test_overwrite_qruv_rank_1(self):
# Any positive strided q, r, u, and v can be overwritten for a rank 1
# update, only checking C and F contiguous.
a, q0, r0, u0, v0 = self.generate('sqr')
a1 = a + np.outer(u0, v0.conj())
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('F')
# don't overwrite
q1, r1 = qr_update(q, r, u, v, False)
check_qr(q1, r1, a1, self.rtol, self.atol)
check_qr(q, r, a, self.rtol, self.atol)
q2, r2 = qr_update(q, r, u, v, True)
check_qr(q2, r2, a1, self.rtol, self.atol)
# verify the overwriting, no good way to check u and v.
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
q = q0.copy('C')
r = r0.copy('C')
u = u0.copy('C')
v = v0.copy('C')
q3, r3 = qr_update(q, r, u, v, True)
check_qr(q3, r3, a1, self.rtol, self.atol)
assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
def test_overwrite_qruv_rank_1_economic(self):
# updating economic decompositions can overwrite any contigous r,
# and positively strided r and u. V is only ever read.
# only checking C and F contiguous.
a, q0, r0, u0, v0 = self.generate('tall', 'economic')
a1 = a + np.outer(u0, v0.conj())
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('F')
# don't overwrite
q1, r1 = qr_update(q, r, u, v, False)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
check_qr(q, r, a, self.rtol, self.atol, False)
q2, r2 = qr_update(q, r, u, v, True)
check_qr(q2, r2, a1, self.rtol, self.atol, False)
# verify the overwriting, no good way to check u and v.
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
q = q0.copy('C')
r = r0.copy('C')
u = u0.copy('C')
v = v0.copy('C')
q3, r3 = qr_update(q, r, u, v, True)
check_qr(q3, r3, a1, self.rtol, self.atol, False)
assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
def test_overwrite_qruv_rank_p(self):
# for rank p updates, q r must be F contiguous, v must be C (v.T --> F)
# and u can be C or F, but is only overwritten if Q is C and complex
a, q0, r0, u0, v0 = self.generate('sqr', p=3)
a1 = a + np.dot(u0, v0.T.conj())
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
# don't overwrite
q1, r1 = qr_update(q, r, u, v, False)
check_qr(q1, r1, a1, self.rtol, self.atol)
check_qr(q, r, a, self.rtol, self.atol)
q2, r2 = qr_update(q, r, u, v, True)
check_qr(q2, r2, a1, self.rtol, self.atol)
# verify the overwriting, no good way to check u and v.
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
def test_empty_inputs(self):
a, q, r, u, v = self.generate('tall')
assert_raises(ValueError, qr_update, np.array([]), r, u, v)
assert_raises(ValueError, qr_update, q, np.array([]), u, v)
assert_raises(ValueError, qr_update, q, r, np.array([]), v)
assert_raises(ValueError, qr_update, q, r, u, np.array([]))
def test_mismatched_shapes(self):
a, q, r, u, v = self.generate('tall')
assert_raises(ValueError, qr_update, q, r[1:], u, v)
assert_raises(ValueError, qr_update, q[:-2], r, u, v)
assert_raises(ValueError, qr_update, q, r, u[1:], v)
assert_raises(ValueError, qr_update, q, r, u, v[1:])
def test_unsupported_dtypes(self):
dts = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float16', 'longdouble', 'longcomplex',
'bool']
a, q0, r0, u0, v0 = self.generate('tall')
for dtype in dts:
q = q0.real.astype(dtype)
with np.errstate(invalid="ignore"):
r = r0.real.astype(dtype)
u = u0.real.astype(dtype)
v = v0.real.astype(dtype)
assert_raises(ValueError, qr_update, q, r0, u0, v0)
assert_raises(ValueError, qr_update, q0, r, u0, v0)
assert_raises(ValueError, qr_update, q0, r0, u, v0)
assert_raises(ValueError, qr_update, q0, r0, u0, v)
def test_integer_input(self):
q = np.arange(16).reshape(4, 4)
r = q.copy() # doesn't matter
u = q[:, 0].copy()
v = r[0, :].copy()
assert_raises(ValueError, qr_update, q, r, u, v)
def test_check_finite(self):
a0, q0, r0, u0, v0 = self.generate('tall', p=3)
q = q0.copy('F')
q[1,1] = np.nan
assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q, r0, u0, v0)
r = r0.copy('F')
r[1,1] = np.nan
assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r, u0, v0)
u = u0.copy('F')
u[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v0)
v = v0.copy('F')
v[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v)
def test_economic_check_finite(self):
a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3)
q = q0.copy('F')
q[1,1] = np.nan
assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q, r0, u0, v0)
r = r0.copy('F')
r[1,1] = np.nan
assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r, u0, v0)
u = u0.copy('F')
u[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v0)
v = v0.copy('F')
v[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v)
def test_u_exactly_in_span_q(self):
q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype)
r = np.array([[1, 0], [0, 1]], self.dtype)
u = np.array([0, 0, 0, -1], self.dtype)
v = np.array([1, 2], self.dtype)
q1, r1 = qr_update(q, r, u, v)
a1 = np.dot(q, r) + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
class TestQRupdate_f(BaseQRupdate):
dtype = np.dtype('f')
class TestQRupdate_F(BaseQRupdate):
dtype = np.dtype('F')
class TestQRupdate_d(BaseQRupdate):
dtype = np.dtype('d')
class TestQRupdate_D(BaseQRupdate):
dtype = np.dtype('D')
def test_form_qTu():
# We want to ensure that all of the code paths through this function are
# tested. Most of them should be hit with the rest of test suite, but
# explicit tests make clear precisely what is being tested.
#
# This function expects that Q is either C or F contiguous and square.
# Economic mode decompositions (Q is (M, N), M != N) do not go through this
# function. U may have any positive strides.
#
# Some of these test are duplicates, since contiguous 1d arrays are both C
# and F.
q_order = ['F', 'C']
q_shape = [(8, 8), ]
u_order = ['F', 'C', 'A'] # here A means is not F not C
u_shape = [1, 3]
dtype = ['f', 'd', 'F', 'D']
for qo, qs, uo, us, d in \
itertools.product(q_order, q_shape, u_order, u_shape, dtype):
if us == 1:
check_form_qTu(qo, qs, uo, us, 1, d)
check_form_qTu(qo, qs, uo, us, 2, d)
else:
check_form_qTu(qo, qs, uo, us, 2, d)
def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype):
np.random.seed(47)
if u_shape == 1 and u_ndim == 1:
u_shape = (q_shape[0],)
else:
u_shape = (q_shape[0], u_shape)
dtype = np.dtype(dtype)
if dtype.char in 'fd':
q = np.random.random(q_shape)
u = np.random.random(u_shape)
elif dtype.char in 'FD':
q = np.random.random(q_shape) + 1j*np.random.random(q_shape)
u = np.random.random(u_shape) + 1j*np.random.random(u_shape)
else:
ValueError("form_qTu doesn't support this dtype")
q = np.require(q, dtype, q_order)
if u_order != 'A':
u = np.require(u, dtype, u_order)
else:
u, = make_strided((u.astype(dtype),))
rtol = 10.0 ** -(np.finfo(dtype).precision-2)
atol = 2*np.finfo(dtype).eps
expected = np.dot(q.T.conj(), u)
res = _decomp_update._form_qTu(q, u)
assert_allclose(res, expected, rtol=rtol, atol=atol)
| 68,486
| 39.262787
| 79
|
py
|
scipy
|
scipy-main/scipy/fftpack/pseudo_diffs.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.fftpack` namespace for importing the functions
# included below.
import warnings
from . import _pseudo_diffs
__all__ = [ # noqa: F822
'diff',
'tilbert', 'itilbert', 'hilbert', 'ihilbert',
'cs_diff', 'cc_diff', 'sc_diff', 'ss_diff',
'shift', 'iscomplexobj', 'convolve'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.fftpack.pseudo_diffs is deprecated and has no attribute "
f"{name}. Try looking in scipy.fftpack instead.")
warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
"the `scipy.fftpack.pseudo_diffs` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_pseudo_diffs, name)
| 901
| 28.096774
| 78
|
py
|
scipy
|
scipy-main/scipy/fftpack/_realtransforms.py
|
"""
Real spectrum transforms (DCT, DST, MDCT)
"""
__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
from scipy.fft import _pocketfft
from ._helper import _good_shape
_inverse_typemap = {1: 1, 2: 3, 3: 2, 4: 4}
def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
"""
Return multidimensional Discrete Cosine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
shape : int or array_like of ints or None, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
length ``shape[i]``.
If any element of `shape` is -1, the size of the corresponding
dimension of `x` is used.
axes : int or array_like of ints or None, optional
Axes along which the DCT is computed.
The default is over all axes.
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idctn : Inverse multidimensional DCT
Notes
-----
For full details of the DCT types and normalization modes, as well as
references, see `dct`.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import dctn, idctn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
True
"""
shape = _good_shape(x, shape, axes)
return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
"""
Return multidimensional Discrete Cosine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
shape : int or array_like of ints or None, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
length ``shape[i]``.
If any element of `shape` is -1, the size of the corresponding
dimension of `x` is used.
axes : int or array_like of ints or None, optional
Axes along which the IDCT is computed.
The default is over all axes.
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
dctn : multidimensional DCT
Notes
-----
For full details of the IDCT types and normalization modes, as well as
references, see `idct`.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import dctn, idctn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
True
"""
type = _inverse_typemap[type]
shape = _good_shape(x, shape, axes)
return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
"""
Return multidimensional Discrete Sine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
shape : int or array_like of ints or None, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
length ``shape[i]``.
If any element of `shape` is -1, the size of the corresponding
dimension of `x` is used.
axes : int or array_like of ints or None, optional
Axes along which the DCT is computed.
The default is over all axes.
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idstn : Inverse multidimensional DST
Notes
-----
For full details of the DST types and normalization modes, as well as
references, see `dst`.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import dstn, idstn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
True
"""
shape = _good_shape(x, shape, axes)
return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
"""
Return multidimensional Discrete Sine Transform along the specified axes.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
shape : int or array_like of ints or None, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
length ``shape[i]``.
If any element of `shape` is -1, the size of the corresponding
dimension of `x` is used.
axes : int or array_like of ints or None, optional
Axes along which the IDST is computed.
The default is over all axes.
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
dstn : multidimensional DST
Notes
-----
For full details of the IDST types and normalization modes, as well as
references, see `idst`.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import dstn, idstn
>>> rng = np.random.default_rng()
>>> y = rng.standard_normal((16, 16))
>>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
True
"""
type = _inverse_typemap[type]
shape = _good_shape(x, shape, axes)
return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
r"""
Return the Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idct : Inverse DCT
Notes
-----
For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
MATLAB ``dct(x)``.
There are, theoretically, 8 types of the DCT, only the first 4 types are
implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the'
Inverse DCT generally refers to DCT type 3.
**Type I**
There are several definitions of the DCT-I; we use the following
(for ``norm=None``)
.. math::
y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
\frac{\pi k n}{N-1} \right)
If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling
factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor
``f``
.. math::
f = \begin{cases}
\frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\
\frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases}
.. versionadded:: 1.2.0
Orthonormalization in DCT-I.
.. note::
The DCT-I is only supported for input size > 1.
**Type II**
There are several definitions of the DCT-II; we use the following
(for ``norm=None``)
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
.. math::
f = \begin{cases}
\sqrt{\frac{1}{4N}} & \text{if }k=0, \\
\sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
which makes the corresponding matrix of coefficients orthonormal
(``O @ O.T = np.eye(N)``).
**Type III**
There are several definitions, we use the following (for ``norm=None``)
.. math::
y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
or, for ``norm='ortho'``
.. math::
y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n
\cos\left(\frac{\pi(2k+1)n}{2N}\right)
The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
the orthonormalized DCT-II.
**Type IV**
There are several definitions of the DCT-IV; we use the following
(for ``norm=None``)
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
.. math::
f = \frac{1}{\sqrt{2N}}
.. versionadded:: 1.2.0
Support for DCT-IV.
References
----------
.. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
Makhoul, `IEEE Transactions on acoustics, speech and signal
processing` vol. 28(1), pp. 27-34,
:doi:`10.1109/TASSP.1980.1163351` (1980).
.. [2] Wikipedia, "Discrete cosine transform",
https://en.wikipedia.org/wiki/Discrete_cosine_transform
Examples
--------
The Type 1 DCT is equivalent to the FFT (though faster) for real,
even-symmetrical inputs. The output is also real and even-symmetrical.
Half of the FFT input is used to generate half of the FFT output:
>>> from scipy.fftpack import fft, dct
>>> import numpy as np
>>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
array([ 30., -8., 6., -2., 6., -8.])
>>> dct(np.array([4., 3., 5., 10.]), 1)
array([ 30., -8., 6., -2.])
"""
return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the idct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
idct : ndarray of real
The transformed input array.
See Also
--------
dct : Forward DCT
Notes
-----
For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
MATLAB ``idct(x)``.
'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3.
IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type
3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT
of type 4. For the definition of these types, see `dct`.
Examples
--------
The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
inputs. The output is also real and even-symmetrical. Half of the IFFT
input is used to generate half of the IFFT output:
>>> from scipy.fftpack import ifft, idct
>>> import numpy as np
>>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
array([ 4., 3., 5., 10., 5., 3.])
>>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6
array([ 4., 3., 5., 10.])
"""
type = _inverse_typemap[type]
return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
r"""
Return the Discrete Sine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dst is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
dst : ndarray of reals
The transformed input array.
See Also
--------
idst : Inverse DST
Notes
-----
For a single dimension array ``x``.
There are, theoretically, 8 types of the DST for different combinations of
even/odd boundary conditions and boundary off sets [1]_, only the first
4 types are implemented in scipy.
**Type I**
There are several definitions of the DST-I; we use the following
for ``norm=None``. DST-I assumes the input is odd around `n=-1` and `n=N`.
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
Note that the DST-I is only supported for input size > 1.
The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`.
The orthonormalized DST-I is exactly its own inverse.
**Type II**
There are several definitions of the DST-II; we use the following for
``norm=None``. DST-II assumes the input is odd around `n=-1/2` and
`n=N-1/2`; the output is odd around :math:`k=-1` and even around `k=N-1`
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
.. math::
f = \begin{cases}
\sqrt{\frac{1}{4N}} & \text{if }k = 0, \\
\sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
**Type III**
There are several definitions of the DST-III, we use the following (for
``norm=None``). DST-III assumes the input is odd around `n=-1` and even
around `n=N-1`
.. math::
y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
\frac{\pi(2k+1)(n+1)}{2N}\right)
The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the
orthonormalized DST-II.
.. versionadded:: 0.11.0
**Type IV**
There are several definitions of the DST-IV, we use the following (for
``norm=None``). DST-IV assumes the input is odd around `n=-0.5` and even
around `n=N-0.5`
.. math::
y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
The (unnormalized) DST-IV is its own inverse, up to a factor `2N`. The
orthonormalized DST-IV is exactly its own inverse.
.. versionadded:: 1.2.0
Support for DST-IV.
References
----------
.. [1] Wikipedia, "Discrete sine transform",
https://en.wikipedia.org/wiki/Discrete_sine_transform
"""
return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3, 4}, optional
Type of the DST (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the idst is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
idst : ndarray of real
The transformed input array.
See Also
--------
dst : Forward DST
Notes
-----
'The' IDST is the IDST of type 2, which is the same as DST of type 3.
IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type
3, and IDST of type 3 is the DST of type 2. For the definition of these
types, see `dst`.
.. versionadded:: 0.11.0
"""
type = _inverse_typemap[type]
return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
| 19,214
| 31.078464
| 80
|
py
|
scipy
|
scipy-main/scipy/fftpack/setup.py
|
# Created by Pearu Peterson, August 2002
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fftpack',parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('convolve', sources=['convolve.c'])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 449
| 25.470588
| 62
|
py
|
scipy
|
scipy-main/scipy/fftpack/_basic.py
|
"""
Discrete Fourier Transforms - _basic.py
"""
# Created by Pearu Peterson, August,September 2002
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2']
from scipy.fft import _pocketfft
from ._helper import _good_shape
def fft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
Parameters
----------
x : array_like
Array to Fourier transform.
n : int, optional
Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the fft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
z : complex ndarray
with the elements::
[y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)] if n is even
[y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
See Also
--------
ifft : Inverse FFT
rfft : FFT of a real sequence
Notes
-----
The packing of the result is "standard": If ``A = fft(a, n)``, then
``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
terms, in order of decreasingly negative frequency. So ,for an 8-point
transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
To rearrange the fft output so that the zero-frequency component is
centered, like [-4, -3, -2, -1, 0, 1, 2, 3], use `fftshift`.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non-floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
Note that if ``x`` is real-valued, then ``A[j] == A[n-j].conjugate()``.
If ``x`` is real-valued and ``n`` is even, then ``A[n/2]`` is real.
If the data type of `x` is real, a "real FFT" algorithm is automatically
used, which roughly halves the computation time. To increase efficiency
a little further, use `rfft`, which does the same calculation, but only
outputs half of the symmetrical spectrum. If the data is both real and
symmetrical, the `dct` can again double the efficiency by generating
half of the spectrum from half of the signal.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import fft, ifft
>>> x = np.arange(5)
>>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy.
True
"""
return _pocketfft.fft(x, n, axis, None, overwrite_x)
def ifft(x, n=None, axis=-1, overwrite_x=False):
"""
Return discrete inverse Fourier transform of real or complex sequence.
The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform. If ``n < x.shape[axis]``,
`x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
The default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the ifft's are computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
ifft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
fft : Forward FFT
Notes
-----
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non-floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
This function is most efficient when `n` is a power of two, and least
efficient when `n` is prime.
If the data type of `x` is real, a "real IFFT" algorithm is automatically
used, which roughly halves the computation time.
Examples
--------
>>> from scipy.fftpack import fft, ifft
>>> import numpy as np
>>> x = np.arange(5)
>>> np.allclose(ifft(fft(x)), x, atol=1e-15) # within numerical accuracy.
True
"""
return _pocketfft.ifft(x, n, axis, None, overwrite_x)
def rfft(x, n=None, axis=-1, overwrite_x=False):
"""
Discrete Fourier transform of a real sequence.
Parameters
----------
x : array_like, real-valued
The data to transform.
n : int, optional
Defines the length of the Fourier transform. If `n` is not specified
(the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
`x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
axis : int, optional
The axis along which the transform is applied. The default is the
last axis.
overwrite_x : bool, optional
If set to true, the contents of `x` can be overwritten. Default is
False.
Returns
-------
z : real ndarray
The returned real array contains::
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] if n is odd
where::
y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
j = 0..n-1
See Also
--------
fft, irfft, scipy.fft.rfft
Notes
-----
Within numerical accuracy, ``y == rfft(irfft(y))``.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non-floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
To get an output with a complex datatype, consider using the newer
function `scipy.fft.rfft`.
Examples
--------
>>> from scipy.fftpack import fft, rfft
>>> a = [9, -9, 1, 3]
>>> fft(a)
array([ 4. +0.j, 8.+12.j, 16. +0.j, 8.-12.j])
>>> rfft(a)
array([ 4., 8., 12., 16.])
"""
return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x)
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""
Return inverse discrete Fourier transform of real sequence x.
The contents of `x` are interpreted as the output of the `rfft`
function.
Parameters
----------
x : array_like
Transformed data to invert.
n : int, optional
Length of the inverse Fourier transform.
If n < x.shape[axis], x is truncated.
If n > x.shape[axis], x is zero-padded.
The default results in n = x.shape[axis].
axis : int, optional
Axis along which the ifft's are computed; the default is over
the last axis (i.e., axis=-1).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
irfft : ndarray of floats
The inverse discrete Fourier transform.
See Also
--------
rfft, ifft, scipy.fft.irfft
Notes
-----
The returned real array contains::
[y(0),y(1),...,y(n-1)]
where for n is even::
y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0] + (-1)**(j) x[n-1])
and for n is odd::
y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
* exp(sqrt(-1)*j*k* 2*pi/n)
+ c.c. + x[0])
c.c. denotes complex conjugate of preceding expression.
For details on input parameters, see `rfft`.
To process (conjugate-symmetric) frequency-domain data with a complex
datatype, consider using the newer function `scipy.fft.irfft`.
Examples
--------
>>> from scipy.fftpack import rfft, irfft
>>> a = [1.0, 2.0, 3.0, 4.0, 5.0]
>>> irfft(a)
array([ 2.6 , -3.16405192, 1.24398433, -1.14955713, 1.46962473])
>>> irfft(rfft(a))
array([1., 2., 3., 4., 5.])
"""
return _pocketfft.irfft_fftpack(x, n, axis, None, overwrite_x)
def fftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return multidimensional discrete Fourier transform.
The returned array contains::
y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
where d = len(x.shape) and n = x.shape.
Parameters
----------
x : array_like
The (N-D) array to transform.
shape : int or array_like of ints or None, optional
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
length ``shape[i]``.
If any element of `shape` is -1, the size of the corresponding
dimension of `x` is used.
axes : int or array_like of ints or None, optional
The axes of `x` (`y` if `shape` is not None) along which the
transform is applied.
The default is over all axes.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed. Default is False.
Returns
-------
y : complex-valued N-D NumPy array
The (N-D) DFT of the input array.
See Also
--------
ifftn
Notes
-----
If ``x`` is real-valued, then
``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``.
Both single and double precision routines are implemented. Half precision
inputs will be converted to single precision. Non-floating-point inputs
will be converted to double precision. Long-double precision inputs are
not supported.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import fftn, ifftn
>>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
>>> np.allclose(y, fftn(ifftn(y)))
True
"""
shape = _good_shape(x, shape, axes)
return _pocketfft.fftn(x, shape, axes, None, overwrite_x)
def ifftn(x, shape=None, axes=None, overwrite_x=False):
"""
Return inverse multidimensional discrete Fourier transform.
The sequence can be of an arbitrary type.
The returned array contains::
y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
For description of parameters see `fftn`.
See Also
--------
fftn : for detailed information.
Examples
--------
>>> from scipy.fftpack import fftn, ifftn
>>> import numpy as np
>>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
>>> np.allclose(y, ifftn(fftn(y)))
True
"""
shape = _good_shape(x, shape, axes)
return _pocketfft.ifftn(x, shape, axes, None, overwrite_x)
def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete Fourier transform.
Return the 2-D discrete Fourier transform of the 2-D argument
`x`.
See Also
--------
fftn : for detailed information.
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import fft2, ifft2
>>> y = np.mgrid[:5, :5][0]
>>> y
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]])
>>> np.allclose(y, ifft2(fft2(y)))
True
"""
return fftn(x,shape,axes,overwrite_x)
def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
"""
2-D discrete inverse Fourier transform of real or complex sequence.
Return inverse 2-D discrete Fourier transform of
arbitrary type sequence x.
See `ifft` for more information.
See Also
--------
fft2, ifft
Examples
--------
>>> import numpy as np
>>> from scipy.fftpack import fft2, ifft2
>>> y = np.mgrid[:5, :5][0]
>>> y
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]])
>>> np.allclose(y, fft2(ifft2(y)))
True
"""
return ifftn(x,shape,axes,overwrite_x)
| 13,098
| 29.5338
| 78
|
py
|
scipy
|
scipy-main/scipy/fftpack/helper.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.fftpack` namespace for importing the functions
# included below.
import warnings
from . import _helper
__all__ = [ # noqa: F822
'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.fftpack.helper is deprecated and has no attribute "
f"{name}. Try looking in scipy.fftpack instead.")
warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
"the `scipy.fftpack.helper` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_helper, name)
| 795
| 27.428571
| 77
|
py
|
scipy
|
scipy-main/scipy/fftpack/_helper.py
|
import operator
from numpy.fft.helper import fftshift, ifftshift, fftfreq
import scipy.fft._pocketfft.helper as _helper
import numpy as np
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len']
def rfftfreq(n, d=1.0):
"""DFT sample frequencies (for usage with rfft, irfft).
The returned float array contains the frequency bins in
cycles/unit (with zero at the start) given a window length `n` and a
sample spacing `d`::
f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even
f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing. Default is 1.
Returns
-------
out : ndarray
The array of length `n`, containing the sample frequencies.
Examples
--------
>>> import numpy as np
>>> from scipy import fftpack
>>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> sig_fft = fftpack.rfft(sig)
>>> n = sig_fft.size
>>> timestep = 0.1
>>> freq = fftpack.rfftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ])
"""
n = operator.index(n)
if n < 0:
raise ValueError("n = %s is not valid. "
"n must be a nonnegative integer." % n)
return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d)
def next_fast_len(target):
"""
Find the next fast size of input data to `fft`, for zero-padding, etc.
SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this
returns the next composite of the prime factors 2, 3, and 5 which is
greater than or equal to `target`. (These are also known as 5-smooth
numbers, regular numbers, or Hamming numbers.)
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
Returns
-------
out : int
The first 5-smooth number greater than or equal to `target`.
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
On a particular machine, an FFT of prime length takes 133 ms:
>>> from scipy import fftpack
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> min_len = 10007 # prime length is worst case for speed
>>> a = rng.standard_normal(min_len)
>>> b = fftpack.fft(a)
Zero-padding to the next 5-smooth length reduces computation time to
211 us, a speedup of 630 times:
>>> fftpack.next_fast_len(min_len)
10125
>>> b = fftpack.fft(a, 10125)
Rounding up to the next power of 2 is not optimal, taking 367 us to
compute, 1.7 times as long as the 5-smooth size:
>>> b = fftpack.fft(a, 16384)
"""
# Real transforms use regular sizes so this is backwards compatible
return _helper.good_size(target, True)
def _good_shape(x, shape, axes):
"""Ensure that shape argument is valid for scipy.fftpack
scipy.fftpack does not support len(shape) < x.ndim when axes is not given.
"""
if shape is not None and axes is None:
shape = _helper._iterable_of_int(shape, 'shape')
if len(shape) != np.ndim(x):
raise ValueError("when given, axes and shape arguments"
" have to be of the same length")
return shape
| 3,354
| 28.690265
| 78
|
py
|
scipy
|
scipy-main/scipy/fftpack/realtransforms.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.fftpack` namespace for importing the functions
# included below.
import warnings
from . import _realtransforms
__all__ = [ # noqa: F822
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.fftpack.realtransforms is deprecated and has no attribute "
f"{name}. Try looking in scipy.fftpack instead.")
warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
"the `scipy.fftpack.realtransforms` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_realtransforms, name)
| 826
| 28.535714
| 80
|
py
|
scipy
|
scipy-main/scipy/fftpack/__init__.py
|
"""
=========================================================
Legacy discrete Fourier transforms (:mod:`scipy.fftpack`)
=========================================================
.. legacy::
New code should use :mod:`scipy.fft`.
Fast Fourier Transforms (FFTs)
==============================
.. autosummary::
:toctree: generated/
fft - Fast (discrete) Fourier Transform (FFT)
ifft - Inverse FFT
fft2 - 2-D FFT
ifft2 - 2-D inverse FFT
fftn - N-D FFT
ifftn - N-D inverse FFT
rfft - FFT of strictly real-valued sequence
irfft - Inverse of rfft
dct - Discrete cosine transform
idct - Inverse discrete cosine transform
dctn - N-D Discrete cosine transform
idctn - N-D Inverse discrete cosine transform
dst - Discrete sine transform
idst - Inverse discrete sine transform
dstn - N-D Discrete sine transform
idstn - N-D Inverse discrete sine transform
Differential and pseudo-differential operators
==============================================
.. autosummary::
:toctree: generated/
diff - Differentiation and integration of periodic sequences
tilbert - Tilbert transform: cs_diff(x,h,h)
itilbert - Inverse Tilbert transform: sc_diff(x,h,h)
hilbert - Hilbert transform: cs_diff(x,inf,inf)
ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf)
cs_diff - cosh/sinh pseudo-derivative of periodic sequences
sc_diff - sinh/cosh pseudo-derivative of periodic sequences
ss_diff - sinh/sinh pseudo-derivative of periodic sequences
cc_diff - cosh/cosh pseudo-derivative of periodic sequences
shift - Shift periodic sequences
Helper functions
================
.. autosummary::
:toctree: generated/
fftshift - Shift the zero-frequency component to the center of the spectrum
ifftshift - The inverse of `fftshift`
fftfreq - Return the Discrete Fourier Transform sample frequencies
rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
next_fast_len - Find the optimal length to zero-pad an FFT for speed
Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions
exposed by ``fftpack``; importing them from ``numpy`` should be preferred.
Convolutions (:mod:`scipy.fftpack.convolve`)
============================================
.. module:: scipy.fftpack.convolve
.. autosummary::
:toctree: generated/
convolve
convolve_z
init_convolution_kernel
destroy_convolve_cache
"""
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2',
'diff',
'tilbert','itilbert','hilbert','ihilbert',
'sc_diff','cs_diff','cc_diff','ss_diff',
'shift',
'fftfreq', 'rfftfreq',
'fftshift', 'ifftshift',
'next_fast_len',
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
]
from ._basic import *
from ._pseudo_diffs import *
from ._helper import *
from ._realtransforms import *
# Deprecated namespaces, to be removed in v2.0.0
from . import basic, helper, pseudo_diffs, realtransforms
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 3,155
| 29.346154
| 78
|
py
|
scipy
|
scipy-main/scipy/fftpack/basic.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.fftpack` namespace for importing the functions
# included below.
import warnings
from . import _basic
__all__ = [ # noqa: F822
'fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.fftpack.basic is deprecated and has no attribute "
f"{name}. Try looking in scipy.fftpack instead.")
warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
"the `scipy.fftpack.basic` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_basic, name)
| 790
| 26.275862
| 77
|
py
|
scipy
|
scipy-main/scipy/fftpack/_pseudo_diffs.py
|
"""
Differential and pseudo-differential operators.
"""
# Created by Pearu Peterson, September 2002
__all__ = ['diff',
'tilbert','itilbert','hilbert','ihilbert',
'cs_diff','cc_diff','sc_diff','ss_diff',
'shift']
from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
from . import convolve
from scipy.fft._pocketfft.helper import _datacopied
_cache = {}
def diff(x,order=1,period=None, _cache=_cache):
"""
Return kth derivative (or integral) of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
y_0 = 0 if order is not 0.
Parameters
----------
x : array_like
Input array.
order : int, optional
The order of differentiation. Default order is 1. If order is
negative, then integration is carried out under the assumption
that ``x_0 == 0``.
period : float, optional
The assumed period of the sequence. Default is ``2*pi``.
Notes
-----
If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within
numerical accuracy).
For odd order and even ``len(x)``, the Nyquist mode is taken zero.
"""
tmp = asarray(x)
if order == 0:
return tmp
if iscomplexobj(tmp):
return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
if period is not None:
c = 2*pi/period
else:
c = 1.0
n = len(x)
omega = _cache.get((n,order,c))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,order=order,c=c):
if k:
return pow(c*k,order)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=order,
zero_nyquist=1)
_cache[(n,order,c)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=order % 2,
overwrite_x=overwrite_x)
del _cache
_cache = {}
def tilbert(x, h, period=None, _cache=_cache):
"""
Return h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
The input array to transform.
h : float
Defines the parameter of the Tilbert transform.
period : float, optional
The assumed period of the sequence. Default period is ``2*pi``.
Returns
-------
tilbert : ndarray
The result of the transform.
Notes
-----
If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd, then
``tilbert(itilbert(x)) == x``.
If ``2 * pi * h / period`` is approximately 10 or larger, then
numerically ``tilbert == hilbert``
(theoretically oo-Tilbert == Hilbert).
For even ``len(x)``, the Nyquist mode of ``x`` is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return tilbert(tmp.real, h, period) + \
1j * tilbert(tmp.imag, h, period)
if period is not None:
h = h * 2 * pi / period
n = len(x)
omega = _cache.get((n, h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k, h=h):
if k:
return 1.0/tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n, kernel, d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def itilbert(x,h,period=None, _cache=_cache):
"""
Return inverse h-Tilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
For more details, see `tilbert`.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return itilbert(tmp.real,h,period) + \
1j*itilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,h=h):
if k:
return -tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def hilbert(x, _cache=_cache):
"""
Return Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sign(j) * x_j
y_0 = 0
Parameters
----------
x : array_like
The input array, should be periodic.
_cache : dict, optional
Dictionary that contains the kernel used to do a convolution with.
Returns
-------
y : ndarray
The transformed input.
See Also
--------
scipy.signal.hilbert : Compute the analytic signal, using the Hilbert
transform.
Notes
-----
If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``.
For even len(x), the Nyquist mode of x is taken zero.
The sign of the returned transform does not have a factor -1 that is more
often than not found in the definition of the Hilbert transform. Note also
that `scipy.signal.hilbert` does have an extra -1 factor compared to this
function.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return hilbert(tmp.real)+1j*hilbert(tmp.imag)
n = len(x)
omega = _cache.get(n)
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k):
if k > 0:
return 1.0
elif k < 0:
return -1.0
return 0.0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[n] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
def ihilbert(x):
"""
Return inverse Hilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*sign(j) * x_j
y_0 = 0
"""
return -hilbert(x)
_cache = {}
def cs_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a, b : float
Defines the parameters of the cosh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence. Default period is ``2*pi``.
Returns
-------
cs_diff : ndarray
Pseudo-derivative of periodic sequence `x`.
Notes
-----
For even len(`x`), the Nyquist mode of `x` is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cs_diff(tmp.real,a,b,period) + \
1j*cs_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return -cosh(a*k)/sinh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def sc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return sc_diff(tmp.real,a,b,period) + \
1j*sc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/cosh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def ss_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = a/b * x_0
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is ``2*pi``.
Notes
-----
``ss_diff(ss_diff(x,a,b),b,a) == x``
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return ss_diff(tmp.real,a,b,period) + \
1j*ss_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/sinh(b*k)
return float(a)/b
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def cc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a,b : float
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is ``2*pi``.
Returns
-------
cc_diff : ndarray
Pseudo-derivative of periodic sequence `x`.
Notes
-----
``cc_diff(cc_diff(x,a,b),b,a) == x``
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cc_diff(tmp.real,a,b,period) + \
1j*cc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
return cosh(a*k)/cosh(b*k)
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def shift(x, a, period=None, _cache=_cache):
"""
Shift periodic sequence x by a: y(u) = x(u+a).
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a : float
Defines the parameters of the sinh/sinh pseudo-differential
period : float, optional
The period of the sequences x and y. Default period is ``2*pi``.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
if period is not None:
a = a*2*pi/period
n = len(x)
omega = _cache.get((n,a))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel_real(k,a=a):
return cos(a*k)
def kernel_imag(k,a=a):
return sin(a*k)
omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
zero_nyquist=0)
omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
zero_nyquist=0)
_cache[(n,a)] = omega_real,omega_imag
else:
omega_real,omega_imag = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve_z(tmp,omega_real,omega_imag,
overwrite_x=overwrite_x)
del _cache
| 14,200
| 24.726449
| 80
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/test_real_transforms.py
|
from os.path import join, dirname
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import pytest
from pytest import raises as assert_raises
from scipy.fftpack._realtransforms import (
dct, idct, dst, idst, dctn, idctn, dstn, idstn)
# Matlab reference data
MDATA = np.load(join(dirname(__file__), 'test.npz'))
X = [MDATA['x%d' % i] for i in range(8)]
Y = [MDATA['y%d' % i] for i in range(8)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def dct_2d_ref(x, **kwargs):
"""Calculate reference values for testing dct2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dct(x[:, col], **kwargs)
return x
def idct_2d_ref(x, **kwargs):
"""Calculate reference values for testing idct2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idct(x[:, col], **kwargs)
return x
def dst_2d_ref(x, **kwargs):
"""Calculate reference values for testing dst2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dst(x[:, col], **kwargs)
return x
def idst_2d_ref(x, **kwargs):
"""Calculate reference values for testing idst2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idst(x[:, col], **kwargs)
return x
def naive_dct1(x, norm=None):
"""Calculate textbook definition version of DCT-I."""
x = np.array(x, copy=True)
N = len(x)
M = N-1
y = np.zeros(N)
m0, m = 1, 2
if norm == 'ortho':
m0 = np.sqrt(1.0/M)
m = np.sqrt(2.0/M)
for k in range(N):
for n in range(1, N-1):
y[k] += m*x[n]*np.cos(np.pi*n*k/M)
y[k] += m0 * x[0]
y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
if norm == 'ortho':
y[0] *= 1/np.sqrt(2)
y[N-1] *= 1/np.sqrt(2)
return y
def naive_dst1(x, norm=None):
"""Calculate textbook definition version of DST-I."""
x = np.array(x, copy=True)
N = len(x)
M = N+1
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
if norm == 'ortho':
y *= np.sqrt(0.5/M)
return y
def naive_dct4(x, norm=None):
"""Calculate textbook definition version of DCT-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
def naive_dst4(x, norm=None):
"""Calculate textbook definition version of DST-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
class TestComplex:
def test_dct_complex64(self):
y = dct(1j*np.arange(5, dtype=np.complex64))
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dct_complex(self):
y = dct(np.arange(5)*1j)
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_idct_complex(self):
y = idct(np.arange(5)*1j)
x = 1j*idct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex64(self):
y = dst(np.arange(5, dtype=np.complex64)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex(self):
y = dst(np.arange(5)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_idst_complex(self):
y = idst(np.arange(5)*1j)
x = 1j*idst(np.arange(5))
assert_array_almost_equal(x, y)
class _TestDCTBase:
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
def test_axis(self):
nt = 2
for i in [7, 8, 9, 16, 32, 64]:
x = np.random.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
decimal=self.dec)
class _TestDCTIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
dt = np.result_type(np.float32, self.rdt)
for xr in X:
x = np.array(xr, dtype=self.rdt)
y = dct(x, norm='ortho', type=1)
y2 = naive_dct1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class _TestDCTIIBase(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondence with MATLAB (orthornomal mode).
dt = np.result_type(np.float32, self.rdt)
for xr, yr in zip(X, Y):
x = np.array(xr, dtype=dt)
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
class _TestDCTIIIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
dt = np.result_type(np.float32, self.rdt)
for xr in X:
x = np.array(xr, dtype=self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
class _TestDCTIVBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
dt = np.result_type(np.float32, self.rdt)
for xr in X:
x = np.array(xr, dtype=self.rdt)
y = dct(x, norm='ortho', type=4)
y2 = naive_dct4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class TestDCTIDouble(_TestDCTIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestDCTIFloat(_TestDCTIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestDCTIInt(_TestDCTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDCTIIDouble(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestDCTIIFloat(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestDCTIIInt(_TestDCTIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestDCTIIIDouble(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDCTIIIFloat(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIIIInt(_TestDCTIIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class TestDCTIVDouble(_TestDCTIVBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 3
class TestDCTIVFloat(_TestDCTIVBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIVInt(_TestDCTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestIDCTBase:
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i-1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDCTIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestIDCTIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDCTIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDCTIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestIDCTIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestIDCTIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestIDCTIIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDCTIIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestIDCTIIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class TestIDCTIVDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestIDCTIVFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 4
class TestIDCTIVInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
class _TestDSTBase:
def setup_method(self):
self.rdt = None # dtype
self.dec = None # number of decimals to match
self.type = None # dst type
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
y = dst(xr, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
class _TestDSTIBase(_TestDSTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
dt = np.result_type(np.float32, self.rdt)
for xr in X:
x = np.array(xr, dtype=self.rdt)
y = dst(x, norm='ortho', type=1)
y2 = naive_dst1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class _TestDSTIVBase(_TestDSTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
dt = np.result_type(np.float32, self.rdt)
for xr in X:
x = np.array(xr, dtype=self.rdt)
y = dst(x, norm='ortho', type=4)
y2 = naive_dst4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, y2, decimal=self.dec)
class TestDSTIDouble(_TestDSTIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestDSTIFloat(_TestDSTIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestDSTIInt(_TestDSTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDSTIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestDSTIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestDSTIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestDSTIIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDSTIIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
class TestDSTIIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 7
self.type = 3
class TestDSTIVDouble(_TestDSTIVBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestDSTIVFloat(_TestDSTIVBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 4
class TestDSTIVInt(_TestDSTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
class _TestIDSTBase:
def setup_method(self):
self.rdt = None
self.dec = None
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
x = idst(yr, type=self.type)
if self.type == 1:
x /= 2 * (i+1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(x) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDSTIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestIDSTIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDSTIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDSTIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestIDSTIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestIDSTIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestIDSTIIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDSTIIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 3
class TestIDSTIIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 3
class TestIDSTIVDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestIDSTIVFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 4
class TestIDSTIVnt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 4
class TestOverwrite:
"""Check input overwrite behavior."""
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not overwrite_x:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3, 4]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
self._check(data, routine, type, None, axis, norm,
overwrite_x)
def test_dct(self):
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1)
self._check_1d(dct, dtype, (16, 2), 0)
self._check_1d(dct, dtype, (2, 16), 1)
def test_idct(self):
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1)
self._check_1d(idct, dtype, (16, 2), 0)
self._check_1d(idct, dtype, (2, 16), 1)
def test_dst(self):
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1)
self._check_1d(dst, dtype, (16, 2), 0)
self._check_1d(dst, dtype, (2, 16), 1)
def test_idst(self):
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1)
self._check_1d(idst, dtype, (16, 2), 0)
self._check_1d(idst, dtype, (2, 16), 1)
class Test_DCTN_IDCTN:
dec = 14
dct_type = [1, 2, 3, 4]
norms = [None, 'ortho']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [None,
1, (1,), [1],
0, (0,), [0],
(0, 1), [0, 1],
(-2, -1), [-2, -1]])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', ['ortho'])
def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=12)
@pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
(dstn, dst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
dct_type, norm):
y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
y2 = fforward_ref(self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
(idstn, idst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', [None, 'ortho'])
def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
dct_type, norm):
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = finverse(fdata, type=dct_type, norm=norm)
y2 = finverse_ref(fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
def test_axes_and_shape(self, fforward, finverse):
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape, axes=0)
@pytest.mark.parametrize('fforward', [dctn, dstn])
def test_shape(self, fforward):
tmp = fforward(self.data, shape=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [1, (1,), [1],
0, (0,), [0]])
def test_shape_is_none_with_axes(self, fforward, finverse, axes):
tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
| 23,953
| 28.355392
| 86
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/gen_fftw_ref.py
|
from subprocess import Popen, PIPE, STDOUT
import numpy as np
SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024]
def gen_data(dt):
arrays = {}
if dt == np.float128:
pg = './fftw_longdouble'
elif dt == np.double:
pg = './fftw_double'
elif dt == np.float32:
pg = './fftw_single'
else:
raise ValueError("unknown: %s" % dt)
# Generate test data using FFTW for reference
for type in [1, 2, 3, 4, 5, 6, 7, 8]:
arrays[type] = {}
for sz in SZ:
a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT)
st = [i.decode('ascii').strip() for i in a.stdout.readlines()]
arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt)
return arrays
# generate single precision data
data = gen_data(np.float32)
filename = 'fftw_single_ref'
# Save ref data into npz format
d = {'sizes': SZ}
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
# generate double precision data
data = gen_data(np.float64)
filename = 'fftw_double_ref'
# Save ref data into npz format
d = {'sizes': SZ}
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
# generate long double precision data
data = gen_data(np.float128)
filename = 'fftw_longdouble_ref'
# Save ref data into npz format
d = {'sizes': SZ}
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
| 1,923
| 24.653333
| 77
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/test_basic.py
|
# Created by Pearu Peterson, September 2002
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
from numpy import (arange, array, asarray, zeros, dot, exp, pi,
swapaxes, double, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFTPACK
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = f"size: {size} rdt: {rdt}"
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
r = zeros(n, dtype=double)
for i in range(n//2+1):
y = dot(exp(i*w), x)
if i:
r[2*i-1] = y.real
if 2*i < n:
r[2*i] = y.imag
else:
r[0] = y.real
return r
def direct_irdft(x):
x = asarray(x)
n = len(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
if i:
if 2*i < n:
x1[i] = x[2*i-1] + 1j*x[2*i]
x1[n-i] = x[2*i-1] - 1j*x[2*i]
else:
x1[i] = x[2*i-1]
else:
x1[0] = x[0]
return direct_idft(x1).real
class _TestFFTBase:
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
@pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
def test_notice(self):
pass
class TestFloat16FFT:
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestRFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
# See gh-5790
class MockSeries:
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError as e:
raise AttributeError("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)) from e
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
def test_complex_input(self):
assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64))
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2 = [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x))
y2 = rfft(irfft(x))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.rdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64))
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2:
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, shape=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, shape=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), shape=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fftn(x, shape=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
class TestIfftn:
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
class FakeArray:
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2:
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
class TestOverwrite:
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = (np.float32, np.float64)
dtypes = real_dtypes + (np.complex64, np.complex128)
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "{}({}{!r}, {!r}, axis={!r}, overwrite_x={!r})".format(
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not overwrite_x:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
if axes is None:
part_shape = shape
else:
part_shape = tuple(np.take(shape, axes))
for fftshape in fftshape_iter(part_shape):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x)
if data.ndim > 1:
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fftn, ifftn, fft2])
def test_shape_axes_ndarray(func):
# Test fftn and ifftn work with NumPy arrays for shape and axes arguments
# Regression test for gh-13342
a = np.random.rand(10, 10)
expect = func(a, shape=(5, 5))
actual = func(a, shape=np.array([5, 5]))
assert_equal(expect, actual)
expect = func(a, axes=(-1,))
actual = func(a, axes=np.array([-1,]))
assert_equal(expect, actual)
expect = func(a, shape=(4, 7), axes=(1, 0))
actual = func(a, shape=np.array([4, 7]), axes=np.array([1, 0]))
assert_equal(expect, actual)
| 30,284
| 33.730505
| 161
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/test_import.py
|
"""Test possibility of patching fftpack with pyfftw.
No module source outside of scipy.fftpack should contain an import of
the form `from scipy.fftpack import ...`, so that a simple replacement
of scipy.fftpack by the corresponding fftw interface completely swaps
the two FFT implementations.
Because this simply inspects source files, we only need to run the test
on one version of Python.
"""
from pathlib import Path
import re
import tokenize
from numpy.testing import assert_
import scipy
class TestFFTPackImport:
def test_fftpack_import(self):
base = Path(scipy.__file__).parent
regexp = r"\s*from.+\.fftpack import .*\n"
for path in base.rglob("*.py"):
if base / "fftpack" in path.parents:
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g., LANG='C')
with tokenize.open(str(path)) as file:
assert_(all(not re.fullmatch(regexp, line)
for line in file),
f"{path} contains an import from fftpack")
| 1,120
| 34.03125
| 71
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/gendata.py
|
import numpy as np
from scipy.io import loadmat
m = loadmat('test.mat', squeeze_me=True, struct_as_record=True,
mat_dtype=True)
np.savez('test.npz', **m)
| 163
| 22.428571
| 63
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/test_pseudo_diffs.py
|
# Created by Pearu Peterson, September 2002
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test(<level>)'
Run tests if fftpack is not installed:
python tests/test_pseudo_diffs.py [<level>]
"""
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_almost_equal)
from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,
ihilbert, shift, fftfreq, cs_diff, sc_diff,
ss_diff, cc_diff)
import numpy as np
from numpy import arange, sin, cos, pi, exp, tanh, sum, sign
from numpy.random import random
def direct_diff(x,k=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*2j*pi/period*n
if k < 0:
w = 1 / w**k
w[0] = 0.0
else:
w = w**k
if n > 2000:
w[250:n-250] = 0.0
return ifft(w*fx).real
def direct_tilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w[0] = 1
w = 1j/tanh(w)
w[0] = 0j
return ifft(w*fx)
def direct_itilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w = -1j*tanh(w)
return ifft(w*fx)
def direct_hilbert(x):
fx = fft(x)
n = len(fx)
w = fftfreq(n)*n
w = 1j*sign(w)
return ifft(w*fx)
def direct_ihilbert(x):
return -direct_hilbert(x)
def direct_shift(x,a,period=None):
n = len(x)
if period is None:
k = fftfreq(n)*1j*n
else:
k = fftfreq(n)*2j*pi/period*n
return ifft(fft(x)*exp(k*a)).real
class TestDiff:
def test_definition(self):
for n in [16,17,64,127,32]:
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))
assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))
assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))
assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))
assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))
assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))
assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))
assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))
assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))
assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))
assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))
assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))
assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))
assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))
for k in range(5):
assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))
assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))
def test_period(self):
for n in [17,64]:
x = arange(n)/float(n)
assert_array_almost_equal(diff(sin(2*pi*x),period=1),
2*pi*cos(2*pi*x))
assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),
-(2*pi)**3*cos(2*pi*x))
def test_sin(self):
for n in [32,64,77]:
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x)),cos(x))
assert_array_almost_equal(diff(cos(x)),-sin(x))
assert_array_almost_equal(diff(sin(x),2),-sin(x))
assert_array_almost_equal(diff(sin(x),4),sin(x))
assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))
assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))
def test_expr(self):
for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:
x = arange(n)*2*pi/n
f = sin(x)*cos(4*x)+exp(sin(3*x))
df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
- 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
d1 = diff(f)
assert_array_almost_equal(d1,df)
assert_array_almost_equal(diff(df),ddf)
assert_array_almost_equal(diff(f,2),ddf)
assert_array_almost_equal(diff(ddf,-1),df)
def test_expr_large(self):
for n in [2048,4096]:
x = arange(n)*2*pi/n
f = sin(x)*cos(4*x)+exp(sin(3*x))
df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
- 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
assert_array_almost_equal(diff(f),df)
assert_array_almost_equal(diff(df),ddf)
assert_array_almost_equal(diff(ddf,-1),df)
assert_array_almost_equal(diff(f,2),ddf)
def test_int(self):
n = 64
x = arange(n)*2*pi/n
assert_array_almost_equal(diff(sin(x),-1),-cos(x))
assert_array_almost_equal(diff(sin(x),-2),-sin(x))
assert_array_almost_equal(diff(sin(x),-4),sin(x))
assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))
def test_random_even(self):
for k in [0,2,4,6]:
for n in [60,32,64,56,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
def test_random_odd(self):
for k in [0,1,2,3,4,5,6]:
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
def test_zero_nyquist(self):
for k in [0,1,2,3,4,5,6]:
for n in [32,33,64,56,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(diff(diff(f,k),-k),f)
assert_array_almost_equal(diff(diff(f,-k),k),f)
class TestTilbert:
def test_definition(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = tilbert(sin(x),h)
y1 = direct_tilbert(sin(x),h)
assert_array_almost_equal(y,y1)
assert_array_almost_equal(tilbert(sin(x),h),
direct_tilbert(sin(x),h))
assert_array_almost_equal(tilbert(sin(2*x),h),
direct_tilbert(sin(2*x),h))
def test_random_even(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [32,64,56]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)
def test_random_odd(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(itilbert(tilbert(f,h),h),f)
assert_array_almost_equal(tilbert(itilbert(f,h),h),f)
class TestITilbert:
def test_definition(self):
for h in [0.1,0.5,1,5.5,10]:
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = itilbert(sin(x),h)
y1 = direct_itilbert(sin(x),h)
assert_array_almost_equal(y,y1)
assert_array_almost_equal(itilbert(sin(x),h),
direct_itilbert(sin(x),h))
assert_array_almost_equal(itilbert(sin(2*x),h),
direct_itilbert(sin(2*x),h))
class TestHilbert:
def test_definition(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = hilbert(sin(x))
y1 = direct_hilbert(sin(x))
assert_array_almost_equal(y,y1)
assert_array_almost_equal(hilbert(sin(2*x)),
direct_hilbert(sin(2*x)))
def test_tilbert_relation(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
f = sin(x)+cos(2*x)*sin(x)
y = hilbert(f)
y1 = direct_hilbert(f)
assert_array_almost_equal(y,y1)
y2 = tilbert(f,h=10)
assert_array_almost_equal(y,y2)
def test_random_odd(self):
for n in [33,65,55]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(ihilbert(hilbert(f)),f)
assert_array_almost_equal(hilbert(ihilbert(f)),f)
def test_random_even(self):
for n in [32,64,56]:
f = random((n,))
af = sum(f,axis=0)/n
f = f-af
# zeroing Nyquist mode:
f = diff(diff(f,1),-1)
assert_almost_equal(sum(f,axis=0),0.0)
assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
assert_array_almost_equal(hilbert(ihilbert(f)),f)
class TestIHilbert:
def test_definition(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
y = ihilbert(sin(x))
y1 = direct_ihilbert(sin(x))
assert_array_almost_equal(y,y1)
assert_array_almost_equal(ihilbert(sin(2*x)),
direct_ihilbert(sin(2*x)))
def test_itilbert_relation(self):
for n in [16,17,64,127]:
x = arange(n)*2*pi/n
f = sin(x)+cos(2*x)*sin(x)
y = ihilbert(f)
y1 = direct_ihilbert(f)
assert_array_almost_equal(y,y1)
y2 = itilbert(f,h=10)
assert_array_almost_equal(y,y2)
class TestShift:
def test_definition(self):
for n in [18,17,64,127,32,2048,256]:
x = arange(n)*2*pi/n
for a in [0.1,3]:
assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))
assert_array_almost_equal(shift(sin(x),a),sin(x+a))
assert_array_almost_equal(shift(cos(x),a),cos(x+a))
assert_array_almost_equal(shift(cos(2*x)+sin(x),a),
cos(2*(x+a))+sin(x+a))
assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))
assert_array_almost_equal(shift(sin(x),2*pi),sin(x))
assert_array_almost_equal(shift(sin(x),pi),-sin(x))
assert_array_almost_equal(shift(sin(x),pi/2),cos(x))
class TestOverwrite:
"""Check input overwrite behavior """
real_dtypes = (np.float32, np.float64)
dtypes = real_dtypes + (np.complex64, np.complex128)
def _check(self, x, routine, *args, **kwargs):
x2 = x.copy()
routine(x2, *args, **kwargs)
sig = routine.__name__
if args:
sig += repr(args)
if kwargs:
sig += repr(kwargs)
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, *args, **kwargs):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
self._check(data, routine, *args, **kwargs)
def test_diff(self):
for dtype in self.dtypes:
self._check_1d(diff, dtype, (16,))
def test_tilbert(self):
for dtype in self.dtypes:
self._check_1d(tilbert, dtype, (16,), 1.6)
def test_itilbert(self):
for dtype in self.dtypes:
self._check_1d(itilbert, dtype, (16,), 1.6)
def test_hilbert(self):
for dtype in self.dtypes:
self._check_1d(hilbert, dtype, (16,))
def test_cs_diff(self):
for dtype in self.dtypes:
self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)
def test_sc_diff(self):
for dtype in self.dtypes:
self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)
def test_ss_diff(self):
for dtype in self.dtypes:
self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)
def test_cc_diff(self):
for dtype in self.dtypes:
self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)
def test_shift(self):
for dtype in self.dtypes:
self._check_1d(shift, dtype, (16,), 1.0)
| 13,389
| 34.144357
| 83
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.